| | --- |
| | license: cc-by-nc-4.0 |
| | language: |
| | - ro |
| | base_model: |
| | - OpenLLM-Ro/RoLlama2-7b-Instruct-2024-10-09 |
| | datasets: |
| | - OpenLLM-Ro/ro_dpo_helpsteer |
| | model-index: |
| | - name: OpenLLM-Ro/RoLlama2-7b-Instruct-DPO-2024-10-09 |
| | results: |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: RoMT-Bench |
| | type: RoMT-Bench |
| | metrics: |
| | - name: Score |
| | type: Score |
| | value: 4.61 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: RoCulturaBench |
| | type: RoCulturaBench |
| | metrics: |
| | - name: Score |
| | type: Score |
| | value: 4.80 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: Romanian_Academic_Benchmarks |
| | type: Romanian_Academic_Benchmarks |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 43.20 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_arc_challenge |
| | type: OpenLLM-Ro/ro_arc_challenge |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 44.24 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_mmlu |
| | type: OpenLLM-Ro/ro_mmlu |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 38.39 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_winogrande |
| | type: OpenLLM-Ro/ro_winogrande |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 62.57 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_hellaswag |
| | type: OpenLLM-Ro/ro_hellaswag |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 59.20 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_gsm8k |
| | type: OpenLLM-Ro/ro_gsm8k |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 15.72 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_truthfulqa |
| | type: OpenLLM-Ro/ro_truthfulqa |
| | metrics: |
| | - name: Average accuracy |
| | type: accuracy |
| | value: 39.07 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_binary |
| | type: LaRoSeDa_binary |
| | metrics: |
| | - name: Average macro-f1 |
| | type: macro-f1 |
| | value: 97.31 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_multiclass |
| | type: LaRoSeDa_multiclass |
| | metrics: |
| | - name: Average macro-f1 |
| | type: macro-f1 |
| | value: 60.56 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_binary_finetuned |
| | type: LaRoSeDa_binary_finetuned |
| | metrics: |
| | - name: Average macro-f1 |
| | type: macro-f1 |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_multiclass_finetuned |
| | type: LaRoSeDa_multiclass_finetuned |
| | metrics: |
| | - name: Average macro-f1 |
| | type: macro-f1 |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_EN-RO |
| | type: WMT_EN-RO |
| | metrics: |
| | - name: Average bleu |
| | type: bleu |
| | value: 26.56 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_RO-EN |
| | type: WMT_RO-EN |
| | metrics: |
| | - name: Average bleu |
| | type: bleu |
| | value: 21.68 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_EN-RO_finetuned |
| | type: WMT_EN-RO_finetuned |
| | metrics: |
| | - name: Average bleu |
| | type: bleu |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_RO-EN_finetuned |
| | type: WMT_RO-EN_finetuned |
| | metrics: |
| | - name: Average bleu |
| | type: bleu |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD |
| | type: XQuAD |
| | metrics: |
| | - name: Average exact_match |
| | type: exact_match |
| | value: 35.78 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD |
| | type: XQuAD |
| | metrics: |
| | - name: Average f1 |
| | type: f1 |
| | value: 59.31 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD_finetuned |
| | type: XQuAD_finetuned |
| | metrics: |
| | - name: Average exact_match |
| | type: exact_match |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD_finetuned |
| | type: XQuAD_finetuned |
| | metrics: |
| | - name: Average f1 |
| | type: f1 |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS |
| | type: STS |
| | metrics: |
| | - name: Average spearman |
| | type: spearman |
| | value: 61.22 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS |
| | type: STS |
| | metrics: |
| | - name: Average pearson |
| | type: pearson |
| | value: 58.41 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS_finetuned |
| | type: STS_finetuned |
| | metrics: |
| | - name: Average spearman |
| | type: spearman |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS_finetuned |
| | type: STS_finetuned |
| | metrics: |
| | - name: Average pearson |
| | type: pearson |
| | value: 0.00 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: RoMT-Bench |
| | type: RoMT-Bench |
| | metrics: |
| | - name: First turn |
| | type: Score |
| | value: 5.15 |
| | - name: Second turn |
| | type: Score |
| | value: 4.06 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_arc_challenge |
| | type: OpenLLM-Ro/ro_arc_challenge |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 42.67 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 43.36 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 44.13 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 44.30 |
| | - name: 10-shot |
| | type: accuracy |
| | value: 45.67 |
| | - name: 25-shot |
| | type: accuracy |
| | value: 45.33 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_mmlu |
| | type: OpenLLM-Ro/ro_mmlu |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 36.62 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 38.04 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 39.52 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 39.36 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_winogrande |
| | type: OpenLLM-Ro/ro_winogrande |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 61.72 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 62.04 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 63.85 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 62.67 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_hellaswag |
| | type: OpenLLM-Ro/ro_hellaswag |
| | metrics: |
| | - name: 0-shot |
| | type: accuracy |
| | value: 58.75 |
| | - name: 1-shot |
| | type: accuracy |
| | value: 58.29 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 59.28 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 59.68 |
| | - name: 10-shot |
| | type: accuracy |
| | value: 60.01 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: OpenLLM-Ro/ro_gsm8k |
| | type: OpenLLM-Ro/ro_gsm8k |
| | metrics: |
| | - name: 1-shot |
| | type: accuracy |
| | value: 11.14 |
| | - name: 3-shot |
| | type: accuracy |
| | value: 17.97 |
| | - name: 5-shot |
| | type: accuracy |
| | value: 18.04 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_binary |
| | type: LaRoSeDa_binary |
| | metrics: |
| | - name: 0-shot |
| | type: macro-f1 |
| | value: 98.03 |
| | - name: 1-shot |
| | type: macro-f1 |
| | value: 95.96 |
| | - name: 3-shot |
| | type: macro-f1 |
| | value: 97.33 |
| | - name: 5-shot |
| | type: macro-f1 |
| | value: 97.90 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: LaRoSeDa_multiclass |
| | type: LaRoSeDa_multiclass |
| | metrics: |
| | - name: 0-shot |
| | type: macro-f1 |
| | value: 60.67 |
| | - name: 1-shot |
| | type: macro-f1 |
| | value: 51.37 |
| | - name: 3-shot |
| | type: macro-f1 |
| | value: 62.49 |
| | - name: 5-shot |
| | type: macro-f1 |
| | value: 67.70 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_EN-RO |
| | type: WMT_EN-RO |
| | metrics: |
| | - name: 0-shot |
| | type: bleu |
| | value: 19.83 |
| | - name: 1-shot |
| | type: bleu |
| | value: 29.04 |
| | - name: 3-shot |
| | type: bleu |
| | value: 28.90 |
| | - name: 5-shot |
| | type: bleu |
| | value: 28.47 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: WMT_RO-EN |
| | type: WMT_RO-EN |
| | metrics: |
| | - name: 0-shot |
| | type: bleu |
| | value: 1.74 |
| | - name: 1-shot |
| | type: bleu |
| | value: 15.28 |
| | - name: 3-shot |
| | type: bleu |
| | value: 34.13 |
| | - name: 5-shot |
| | type: bleu |
| | value: 35.56 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD_EM |
| | type: XQuAD_EM |
| | metrics: |
| | - name: 0-shot |
| | type: exact_match |
| | value: 26.97 |
| | - name: 1-shot |
| | type: exact_match |
| | value: 36.30 |
| | - name: 3-shot |
| | type: exact_match |
| | value: 40.25 |
| | - name: 5-shot |
| | type: exact_match |
| | value: 39.58 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: XQuAD_F1 |
| | type: XQuAD_F1 |
| | metrics: |
| | - name: 0-shot |
| | type: f1 |
| | value: 52.90 |
| | - name: 1-shot |
| | type: f1 |
| | value: 60.05 |
| | - name: 3-shot |
| | type: f1 |
| | value: 62.08 |
| | - name: 5-shot |
| | type: f1 |
| | value: 62.22 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS_Spearman |
| | type: STS_Spearman |
| | metrics: |
| | - name: 1-shot |
| | type: spearman |
| | value: 62.07 |
| | - name: 3-shot |
| | type: spearman |
| | value: 59.47 |
| | - name: 5-shot |
| | type: spearman |
| | value: 62.12 |
| | - task: |
| | type: text-generation |
| | dataset: |
| | name: STS_Pearson |
| | type: STS_Pearson |
| | metrics: |
| | - name: 1-shot |
| | type: pearson |
| | value: 60.60 |
| | - name: 3-shot |
| | type: pearson |
| | value: 56.44 |
| | - name: 5-shot |
| | type: pearson |
| | value: 58.18 |
| |
|
| |
|
| | --- |
| | |
| | # Model Card for Model ID |
| |
|
| | <!-- Provide a quick summary of what the model is/does. --> |
| |
|
| | RoLlama2 is a family of pretrained and fine-tuned generative text models for Romanian. This is the repository for the **human aligned instruct 7B model**. Links to other models can be found at the bottom of this page. |
| |
|
| | ## Model Details |
| |
|
| | ### Model Description |
| |
|
| | <!-- Provide a longer summary of what this model is. --> |
| | OpenLLM represents the first open-source effort to build a LLM specialized for Romanian. OpenLLM-Ro developed and publicly releases a collection of Romanian LLMs, both in the form of foundational model and instruct and chat variants. |
| |
|
| |
|
| | - **Developed by:** OpenLLM-Ro |
| | <!-- - **Funded by [optional]:** [More Information Needed] --> |
| | <!-- - **Shared by [optional]:** [More Information Needed] --> |
| | <!-- - **Model type:** [More Information Needed] --> |
| | - **Language(s):** Romanian |
| | - **License:** cc-by-nc-4.0 |
| | - **Finetuned from model:** [RoLlama2-7b-Instruct-2024-10-09](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-2024-10-09) |
| | - **Trained using:** [RoHelpSteer](https://huggingface.co/datasets/OpenLLM-Ro/ro_dpo_helpsteer) |
| |
|
| |
|
| | ### Model Sources |
| |
|
| | <!-- Provide the basic links for the model. --> |
| |
|
| | - **Repository:** https://github.com/OpenLLM-Ro/LLaMA-Factory |
| | - **Paper:** https://arxiv.org/abs/2406.18266 |
| |
|
| | ## Intended Use |
| |
|
| | ### Intended Use Cases |
| |
|
| | RoLlama2 is intented for research use in Romanian. Base models can be adapted for a variety of natural language tasks while instruction and chat tuned models are intended for assistant-like chat. |
| |
|
| | ### Out-of-Scope Use |
| |
|
| | <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> |
| |
|
| | Use in any manner that violates the license, any applicable laws or regluations, use in languages other than Romanian. |
| |
|
| |
|
| |
|
| | ## How to Get Started with the Model |
| |
|
| | Use the code below to get started with the model. |
| |
|
| | ```python |
| | from transformers import AutoTokenizer, AutoModelForCausalLM |
| | |
| | tokenizer = AutoTokenizer.from_pretrained("OpenLLM-Ro/RoLlama2-7b-Instruct-DPO-2024-10-09") |
| | model = AutoModelForCausalLM.from_pretrained("OpenLLM-Ro/RoLlama2-7b-Instruct-DPO-2024-10-09") |
| | |
| | instruction = "Care este cel mai înalt vârf muntos din România?" |
| | chat = [ |
| | {"role": "system", "content": "Ești un asistent folositor, respectuos și onest. Încearcă să ajuți cât mai mult prin informațiile oferite, excluzând răspunsuri toxice, rasiste, sexiste, periculoase și ilegale."}, |
| | {"role": "user", "content": instruction}, |
| | ] |
| | prompt = tokenizer.apply_chat_template(chat, tokenize=False) |
| | |
| | inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") |
| | outputs = model.generate(input_ids=inputs, max_new_tokens=128) |
| | print(tokenizer.decode(outputs[0])) |
| | ``` |
| |
|
| | ## Academic Benchmarks |
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><strong><center>Average</center></strong></td> |
| | <td><strong><center>ARC</center></strong></td> |
| | <td><strong><center>MMLU</center></strong></td> |
| | <td><strong><center>Winogrande</center></strong></td> |
| | <td><strong><center>Hellaswag</center></strong></td> |
| | <td><strong><center>GSM8k</center></strong></td> |
| | <td><strong><center>TruthfulQA</center></strong></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-2-7b-chat</td><td><center>36.84</center></td><td><center>37.03</center></td><td><center>33.80</center></td><td><center>55.87</center></td><td><center>45.36</center></td><td><center>4.90</center></td><td><center>44.09</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center><strong>45.71</strong></center></td><td><center>43.66</center></td><td><center>39.70</center></td><td><center><strong>70.34</strong></center></td><td><center>57.36</center></td><td><center><strong>18.78</strong></center></td><td><center>44.44</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>44.50</center></td><td><center><strong>44.73</strong></center></td><td><center><strong>40.39</strong></center></td><td><center>63.67</center></td><td><center>59.12</center></td><td><center>13.29</center></td><td><center><strong>45.78</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama2-7b-Instruct-DPO-2024-10-09</em></td><td><center><em>43.20</em></center></td><td><center><em>44.24</em></center></td><td><center><em>38.39</em></center></td><td><center><em>62.57</em></center></td><td><center><em><strong>59.20</strong></em></center></td><td><center><em>15.72</em></center></td><td><center><em>39.07</em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| | ## Downstream tasks |
| | |
| | <table> |
| | <tbody> |
| | <tr> |
| | <td></td> |
| | <td colspan="4"><center><strong>LaRoSeDa</strong></center></td> |
| | <td colspan="4"><center><strong>WMT</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><center><strong>Binary<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>Binary<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>Multiclass<br>(Macro F1)</strong></center></td> |
| | <td><center><strong>EN-RO<br>(Bleu)</strong></center></td> |
| | <td><center><strong>RO-EN<br>(Bleu)</strong></center></td> |
| | <td><center><strong>EN-RO<br>(Bleu)</strong></center></td> |
| | <td><center><strong>RO-EN<br>(Bleu)</strong></center> |
| | </tr> |
| | <tr> |
| | <td>Llama-2-7b-chat</td><td><center>87.78</center></td><td><center>52.81</center></td><td><center>97.27</center></td><td><center>82.02</center></td><td><center>15.55</center></td><td><center><strong>28.53</strong></center></td><td><center>19.99</center></td><td><center>31.48</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>97.48</center></td><td><center><strong>65.26</strong></center></td><td><center><strong>98.83</strong></center></td><td><center><strong>87.28</strong></center></td><td><center><strong>27.38</strong></center></td><td><center>10.32</center></td><td><center>27.59</center></td><td><center><strong>40.13</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center><strong>97.66</strong></center></td><td><center>62.41</center></td><td><center>97.97</center></td><td><center>60.89</center></td><td><center>27.13</center></td><td><center>19.39</center></td><td><center><strong>27.63</strong></center></td><td><center>39.75</center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama2-7b-Instruct-DPO-2024-10-09</em></td><td><center><em>97.31</em></center></td><td><center><em>60.56</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>26.56</em></center></td><td><center><em>21.68</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| | |
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td></td> |
| | <td colspan="4"><center><strong>XQuAD</strong></center></td> |
| | <td colspan="4"><center><strong>STS</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | <td colspan="2"><center><strong>Few-shot</strong></center></td> |
| | <td colspan="2"><center><strong>Finetuned</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><center><strong>(EM)</strong></center></td> |
| | <td><center><strong>(F1)</strong></center></td> |
| | <td><center><strong>(EM)</strong></center></td> |
| | <td><center><strong>(F1)</strong></center></td> |
| | <td><center><strong>(Spearman)</strong></center></td> |
| | <td><center><strong>(Pearson)</strong></center></td> |
| | <td><center><strong>(Spearman)</strong></center></td> |
| | <td><center><strong>(Pearson)</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-2-7b-chat</td><td><center>32.35</center></td><td><center>54.00</center></td><td><center><strong>60.34</strong></center></td><td><center><strong>75.98</strong></center></td><td><center>32.56</center></td><td><center>31.99</center></td><td><center>74.08</center></td><td><center>72.64</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>44.52</center></td><td><center>64.75</center></td><td><center>54.96</center></td><td><center>70.20</center></td><td><center><strong>65.50</strong></center></td><td><center><strong>67.79</strong></center></td><td><center>84.44</center></td><td><center>84.76</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center><strong>45.71</strong></center></td><td><center><strong>65.08</strong></center></td><td><center>59.24</center></td><td><center>74.25</center></td><td><center>59.69</center></td><td><center>57.16</center></td><td><center><strong>84.66</strong></center></td><td><center><strong>85.07</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama2-7b-Instruct-DPO-2024-10-09</em></td><td><center><em>35.78</em></center></td><td><center><em>59.31</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td><td><center><em>61.22</em></center></td><td><center><em>58.41</em></center></td><td><center><em>-</em></center></td><td><center><em>-</em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| | ## Romanian MT-Bench |
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><strong><center>Average</center></strong></td> |
| | <td><strong><center>1st turn</center></strong></td> |
| | <td><strong><center>2nd turn</center></strong></td> |
| | <td><strong><center>Answers in Ro</center></strong></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-2-7b-chat</td><td><center>1.08</center></td><td><center>1.44</center></td><td><center>0.73</center></td><td><center>45/160</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>3.86</center></td><td><center>4.67</center></td><td><center>3.04</center></td><td><center><strong>160/160</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>4.43</center></td><td><center>4.92</center></td><td><center>3.94</center></td><td><center><strong>160/160</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama2-7b-Instruct-DPO-2024-10-09</em></td><td><center><em><strong>4.61</strong></em></center></td><td><center><em><strong>5.15</strong></em></center></td><td><center><em><strong>4.06</strong></em></center></td><td><center><em><strong>160/160</strong></em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| | ## RoCulturaBench |
| |
|
| |
|
| | <table> |
| | <tbody> |
| | <tr> |
| | <td><strong>Model</strong></td> |
| | <td><strong><center>Average</center></strong></td> |
| | <td><strong><center>Answers in Ro</center></strong></td> |
| | </tr> |
| | <tr> |
| | <td>Llama-2-7b-chat</td><td><center>1.21</center></td><td><center>33/100</center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-05-14</td><td><center>3.77</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td>RoLlama2-7b-Instruct-2024-10-09</td><td><center>4.08</center></td><td><center><strong>100/100</strong></center></td> |
| | </tr> |
| | <tr> |
| | <td><em>RoLlama2-7b-Instruct-DPO-2024-10-09</em></td><td><center><em><strong>4.80</strong></em></center></td><td><center><em><strong>100/100</strong></em></center></td> |
| | </tr> |
| | </tbody> |
| | </table> |
| |
|
| |
|
| |
|
| | ## RoLlama2 Model Family |
| |
|
| | | Model | Link | |
| | |--------------------|:--------:| |
| | |RoLlama2-7b-Base-2024-05-14 | [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Base-2024-05-14) | |
| | |RoLlama2-7b-Instruct-2024-05-14 | [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-2024-05-14) | |
| | |RoLlama2-7b-Instruct-2024-10-09| [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-2024-10-09) | |
| | |*RoLlama2-7b-Instruct-DPO-2024-10-09*| [link](https://huggingface.co/OpenLLM-Ro/RoLlama2-7b-Instruct-DPO-2024-10-09) | |
| |
|
| |
|
| |
|
| | ## Citation |
| |
|
| | ``` |
| | @misc{masala2024vorbecstiromanecsterecipetrain, |
| | title={"Vorbe\c{s}ti Rom\^ane\c{s}te?" A Recipe to Train Powerful Romanian LLMs with English Instructions}, |
| | author={Mihai Masala and Denis C. Ilie-Ablachim and Alexandru Dima and Dragos Corlatescu and Miruna Zavelca and Ovio Olaru and Simina Terian-Dan and Andrei Terian-Dan and Marius Leordeanu and Horia Velicu and Marius Popescu and Mihai Dascalu and Traian Rebedea}, |
| | year={2024}, |
| | eprint={2406.18266}, |
| | archivePrefix={arXiv}, |
| | primaryClass={cs.CL}, |
| | url={https://arxiv.org/abs/2406.18266}, |
| | } |
| | ``` |
| | <!-- **APA:** |
| |
|
| | [More Information Needed] --> |