Update README.md
Browse files
README.md
CHANGED
@@ -66,9 +66,10 @@ The model can be integrated into larger systems or applications that require Eng
|
|
66 |
|
67 |
Use the code below to get started with the model:
|
68 |
|
|
|
69 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
70 |
|
71 |
-
#
|
72 |
tokenizer = AutoTokenizer.from_pretrained("Satwik11/gemma-2b-mt-Hindi-Fintuned")
|
73 |
model = AutoModelForCausalLM.from_pretrained("Satwik11/gemma-2b-mt-Hindi-Fintuned")
|
74 |
|
@@ -84,7 +85,7 @@ def generate_translation(prompt, max_length=90):
|
|
84 |
|
85 |
return translated_text
|
86 |
|
87 |
-
#
|
88 |
test_sentences = [
|
89 |
"Today is August 19.The maximum temperature is 70 degrees Fahrenheit"
|
90 |
]
|
@@ -94,7 +95,7 @@ for sentence in test_sentences:
|
|
94 |
translation = generate_translation(prompt)
|
95 |
print(translation)
|
96 |
|
97 |
-
|
98 |
## Training Details
|
99 |
|
100 |
### Training Data
|
|
|
66 |
|
67 |
Use the code below to get started with the model:
|
68 |
|
69 |
+
----------------------------------------------------------------------------------------
|
70 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
71 |
|
72 |
+
#Load the model and tokenizer
|
73 |
tokenizer = AutoTokenizer.from_pretrained("Satwik11/gemma-2b-mt-Hindi-Fintuned")
|
74 |
model = AutoModelForCausalLM.from_pretrained("Satwik11/gemma-2b-mt-Hindi-Fintuned")
|
75 |
|
|
|
85 |
|
86 |
return translated_text
|
87 |
|
88 |
+
#Test the model with some example sentences
|
89 |
test_sentences = [
|
90 |
"Today is August 19.The maximum temperature is 70 degrees Fahrenheit"
|
91 |
]
|
|
|
95 |
translation = generate_translation(prompt)
|
96 |
print(translation)
|
97 |
|
98 |
+
-----------------------------------------------------------------------------------------------
|
99 |
## Training Details
|
100 |
|
101 |
### Training Data
|