Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -57,7 +57,7 @@ def _translate(text: str, src_lang: str, tgt_lang: str):
|
|
| 57 |
for paragraph in paragraphs:
|
| 58 |
translated_sentences = []
|
| 59 |
input_tokens = (
|
| 60 |
-
tokenizer("Translate to Chinese
|
| 61 |
.input_ids[0]
|
| 62 |
.cpu()
|
| 63 |
.numpy()
|
|
@@ -65,7 +65,7 @@ def _translate(text: str, src_lang: str, tgt_lang: str):
|
|
| 65 |
)
|
| 66 |
translated_chunk = model.generate(
|
| 67 |
input_ids=torch.tensor([input_tokens]).to(device),
|
| 68 |
-
max_length=len(input_tokens) +
|
| 69 |
num_return_sequences=1,
|
| 70 |
)
|
| 71 |
print(translated_chunk)
|
|
|
|
| 57 |
for paragraph in paragraphs:
|
| 58 |
translated_sentences = []
|
| 59 |
input_tokens = (
|
| 60 |
+
tokenizer("Translate to Chinese. Direct output translation result without any explaination::\n\n" + paragraph, return_tensors="pt")
|
| 61 |
.input_ids[0]
|
| 62 |
.cpu()
|
| 63 |
.numpy()
|
|
|
|
| 65 |
)
|
| 66 |
translated_chunk = model.generate(
|
| 67 |
input_ids=torch.tensor([input_tokens]).to(device),
|
| 68 |
+
max_length=len(input_tokens) + 1000,
|
| 69 |
num_return_sequences=1,
|
| 70 |
)
|
| 71 |
print(translated_chunk)
|