Skip to content

Commit 504440c

Browse files
authored
Fixed import issue for seq2seq examples (intel#191)
* Fixed import issue for seq2seq examples Signed-off-by: Cheng, Penghui <[email protected]>
1 parent f3fea7f commit 504440c

File tree

5 files changed

+7
-8
lines changed

5 files changed

+7
-8
lines changed

examples/huggingface/pytorch/language-modeling/inference/run_clm_no_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@
8383
if args.accuracy:
8484
user_model.eval()
8585
def eval_func(user_model):
86-
from intel_extension_for_transformers.evaluation.lm_eval import evaluate
86+
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate
8787
results = evaluate(
8888
model="hf-causal",
8989
model_args='pretrained='+args.model+',tokenizer='+args.model+',dtype=float32',

examples/huggingface/pytorch/language-modeling/quantization/run_clm_no_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ def eval_func(model):
339339

340340
if args.accuracy:
341341
user_model.eval()
342-
from intel_extension_for_transformers.evaluation.lm_eval import evaluate
342+
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate
343343
results = evaluate(
344344
model="hf-causal",
345345
model_args='pretrained='+args.model+',tokenizer='+args.model+',dtype=float32',

examples/huggingface/pytorch/text2text-generation/run_seq2seq_generation.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
AutoTokenizer,
3737
)
3838

39-
from intel_extension_for_transformers.transformers.modeling import INCModelForSeq2SeqLM
39+
from intel_extension_for_transformers.transformers.modeling.modeling_seq2seq import INCModelForSeq2SeqLM
4040

4141

4242
prompt_texts = ["Translate to German: My name is Arthur",
@@ -431,7 +431,7 @@ def decoder_calib_func(prepared_model):
431431
print("Throughput: {} tokens/sec".format(throughput))
432432

433433
if args.accuracy:
434-
from intel_extension_for_transformers.evaluation.hf_eval import summarization_evaluate
434+
from intel_extension_for_transformers.llm.evaluation.hf_eval import summarization_evaluate
435435
results = summarization_evaluate(
436436
model=model,
437437
tokenizer_name=args.model_name_or_path,

examples/huggingface/pytorch/text2text-generation/run_seq2seq_generation_bart.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
AutoTokenizer,
3737
)
3838

39-
from intel_extension_for_transformers.transformers.modeling import INCModelForSeq2SeqLM
39+
from intel_extension_for_transformers.transformers.modeling.modeling_seq2seq import INCModelForSeq2SeqLM
4040

4141

4242
prompt_texts = ["Translate to German: My name is Arthur",
@@ -430,7 +430,7 @@ def decoder_calib_func(prepared_model):
430430
print("Throughput: {} tokens/sec".format(throughput))
431431

432432
if args.accuracy:
433-
from intel_extension_for_transformers.evaluation.hf_eval import summarization_evaluate
433+
from intel_extension_for_transformers.llm.evaluation.hf_eval import summarization_evaluate
434434
results = summarization_evaluate(
435435
model=model,
436436
tokenizer_name=args.model_name_or_path,

workflows/chatbot/fine_tuning/instruction_tuning_pipeline/finetune_clm.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -683,8 +683,7 @@ def concatenate_data(dataset, max_seq_length):
683683

684684
if finetune_args.do_lm_eval and finetune_args.task != "summarization":
685685
unwrapped_model.eval()
686-
from intel_extension_for_transformers.evaluation.lm_eval import evaluate
687-
686+
from intel_extension_for_transformers.llm.evaluation.lm_eval import evaluate
688687
with training_args.main_process_first(desc="lm_eval"):
689688
if is_main_process(training_args.local_rank):
690689
with torch.no_grad():

0 commit comments

Comments
 (0)