Skip to content

Commit

Permalink
Merge pull request #3 from Chriskuei/patch-1
Browse files Browse the repository at this point in the history
Update README.md
  • Loading branch information
archersama authored Jul 9, 2024
2 parents 5a91eaa + ca3011a commit 9fee801
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ model = YourCustomDEModel(model_name=model_name)
tasks = get_tasks(tasks=["codetrans-dl"])

# Initialize evaluation
evaluation = COIR(tasks=tasksbatch_size=128)
evaluation = COIR(tasks=tasks,batch_size=128)

# Run evaluation
results = evaluation.run(model, output_folder=f"results/{model_name}")
Expand All @@ -130,7 +130,7 @@ model = YourCustomDEModel(model_name=model_name)
tasks = coir.get_tasks(tasks=["codetrans-dl"])

# Initialize evaluation
evaluation = COIR(tasks=tasksbatch_size=128)
evaluation = COIR(tasks=tasks,batch_size=128)

# Run evaluation
results = evaluation.run(model, output_folder=f"results/{model_name}")
Expand Down Expand Up @@ -221,7 +221,7 @@ model = YourCustomDEModel()
tasks = coir.get_tasks(tasks=["codetrans-dl"])

# Initialize evaluation
evaluation = COIR(tasks=tasksbatch_size=128)
evaluation = COIR(tasks=tasks,batch_size=128)

# Run evaluation
results = evaluation.run(model, output_folder=f"results/{model_name}")
Expand Down Expand Up @@ -341,12 +341,12 @@ class APIModel:
model = APIModel()

# Get tasks
#all task ["codetrans-dl","stackoverflow-qa","apps","codefeedback-mt","codefeedback-st","codetrans-contest","synthetic-
# text2sql","cosq","codesearchnet","codesearchnet-ccr"]
#all task ["codetrans-dl", "stackoverflow-qa", "apps","codefeedback-mt", "codefeedback-st", "codetrans-contest", "synthetic-
# text2sql", "cosq", "codesearchnet", "codesearchnet-ccr"]
tasks = coir.get_tasks(tasks=["codetrans-dl"])

# Initialize evaluation
evaluation = COIR(tasks=tasksbatch_size=128)
evaluation = COIR(tasks=tasks, batch_size=128)

# Run evaluation
results = evaluation.run(model, output_folder=f"results/{model_name}")
Expand Down

0 comments on commit 9fee801

Please sign in to comment.