Skip to content

Commit

Permalink
Merge pull request #11 from github/update_codespace
Browse files Browse the repository at this point in the history
Update Codespace with latest changes.
  • Loading branch information
brannon authored Aug 22, 2024
2 parents 2268ec2 + 9b849fa commit bb1756e
Show file tree
Hide file tree
Showing 57 changed files with 253 additions and 135 deletions.
13 changes: 13 additions & 0 deletions .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
ARG VARIANT="focal"
FROM buildpack-deps:${VARIANT}-curl

LABEL dev.containers.features="common"

COPY first-run-notice.txt /tmp/scripts/

# Move first run notice to right spot
RUN mkdir -p "/usr/local/etc/vscode-dev-containers/" \
&& mv -f /tmp/scripts/first-run-notice.txt /usr/local/etc/vscode-dev-containers/

# Remove scripts now that we're done with them
RUN rm -rf /tmp/scripts
8 changes: 8 additions & 0 deletions .devcontainer/bootstrap
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/bin/bash

ROOT_DIR=/workspaces/codespaces-models

npm install ${ROOT_DIR}

pip install -r ${ROOT_DIR}/requirements.txt

54 changes: 50 additions & 4 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,44 @@
{
"image": "mcr.microsoft.com/devcontainers/universal:2",
"updateContentCommand": "npm install; pip install -r requirements.txt",
"postStartCommand": "npm install; pip install -r requirements.txt",
"build": {
"dockerfile": "./Dockerfile",
"context": "."
},
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"username": "codespace",
"userUid": "1000",
"userGid": "1000"
},
"ghcr.io/devcontainers/features/node:1": {
"version": "20"
},
"ghcr.io/devcontainers/features/python:1": {
"version": "3.11.9",
"installJupyterLab": "false"
},
"ghcr.io/devcontainers/features/git:1": {
"version": "latest",
"ppa": "false"
},
"ghcr.io/devcontainers/features/git-lfs:1": {
"version": "latest"
},
"ghcr.io/devcontainers/features/github-cli:1": {
"version": "latest"
}
},
"overrideFeatureInstallOrder": [
"ghcr.io/devcontainers/features/common-utils",
"ghcr.io/devcontainers/features/git",
"ghcr.io/devcontainers/features/node",
"ghcr.io/devcontainers/features/python",
"ghcr.io/devcontainers/features/git-lfs",
"ghcr.io/devcontainers/features/github-cli"
],
"remoteUser": "codespace",
"containerUser": "codespace",
"updateContentCommand": "${containerWorkspaceFolder}/.devcontainer/bootstrap",
"postStartCommand": "${containerWorkspaceFolder}/.devcontainer/bootstrap",
"customizations": {
"codespaces": {
"disableAutomaticConfiguration": true,
Expand All @@ -16,10 +53,19 @@
"ms-toolsai.prompty"
],
"settings": {
/*
NOTE: excluding these Python environments causes Jupyter to select the remaining environment by default
The default environment will be: /usr/local/python/current/bin/python
*/
"jupyter.kernels.excludePythonEnvironments": [
"/usr/local/python/current/bin/python3",
"/usr/bin/python3",
"/bin/python3"
],
"workbench.editorAssociations": {
"*.md": "vscode.markdown.preview.editor"
}
}
}
}
}
}
4 changes: 4 additions & 0 deletions .devcontainer/first-run-notice.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
👋 Welcome to your shiny new Codespace for interacting with GitHub Models! We've got everything fired up and ready for you to explore AI Models hosted on Azure AI.

Take a look at the README to find all of the information you need to get started.

4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
.vscode/
.vscode/*
!.vscode/extensions.json
!.vscode/launch.json
__pycache__/
.env
.DS_Store
Expand Down
5 changes: 5 additions & 0 deletions .vscode/extensions.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"unwantedRecommendations": [
"ms-azuretools.vscode-docker"
]
}
25 changes: 25 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
{
"configurations": [
{
"name": "Run JavaScript Sample",
"program": "${file}",
"cwd": "${fileDirname}",
"envFile": "${workspaceFolder}/.env",
"outputCapture": "std",
"request": "launch",
"skipFiles": [
"<node_internals>/**"
],
"type": "node"
},
{
"name": "Run Python Sample",
"program": "${file}",
"cwd": "${fileDirname}",
"envFile": "${workspaceFolder}/.env",
"redirectOutput": false,
"request": "launch",
"type": "debugpy"
}
]
}
4 changes: 2 additions & 2 deletions cookbooks/python/langchain/lc_openai_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
"\n",
"GPT_MODEL = \"gpt-4o\"\n",
"GPT_MODEL = \"gpt-4o-mini\"\n",
"\n",
"llm = ChatOpenAI(model=GPT_MODEL)"
]
Expand Down Expand Up @@ -373,7 +373,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
6 changes: 3 additions & 3 deletions cookbooks/python/llamaindex/rag_getting_started.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
"\n",
"To run RAG, you need 2 models: a chat model, and an embedding model. The GitHub Model service offers different options.\n",
"\n",
"For instance you could use an Azure OpenAI chat model (`gpt-4o`) and embedding model (`text-embedding-3-small`), or a Cohere chat model (`Cohere-command-r-plus`) and embedding model (`Cohere-embed-v3-multilingual`).\n",
"For instance you could use an Azure OpenAI chat model (`gpt-4o-mini`) and embedding model (`text-embedding-3-small`), or a Cohere chat model (`Cohere-command-r-plus`) and embedding model (`Cohere-embed-v3-multilingual`).\n",
"\n",
"We'll proceed using some of the Azure OpenAI models below. You can find [how to leverage Cohere models in the LlamaIndex documentation](https://docs.llamaindex.ai/en/stable/examples/llm/cohere/).\n",
"\n",
Expand Down Expand Up @@ -89,7 +89,7 @@
"logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))\n",
"\n",
"llm = OpenAI(\n",
" model=\"gpt-4o\",\n",
" model=\"gpt-4o-mini\",\n",
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
" api_base=os.getenv(\"OPENAI_BASE_URL\"),\n",
")\n",
Expand Down Expand Up @@ -258,7 +258,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion cookbooks/python/mistralai/evaluation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion cookbooks/python/mistralai/function_calling.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion cookbooks/python/mistralai/prefix_use_cases.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -868,7 +868,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion cookbooks/python/mistralai/prompting_capabilities.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
8 changes: 4 additions & 4 deletions cookbooks/python/openai/Data_extraction_transformation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
"\n",
"GPT_MODEL = \"gpt-4o\"\n",
"GPT_MODEL = \"gpt-4o-mini\"\n",
"\n",
"client = OpenAI()"
]
Expand Down Expand Up @@ -193,7 +193,7 @@
" \"\"\"\n",
" \n",
" response = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" model=\"gpt-4o-mini\",\n",
" response_format={ \"type\": \"json_object\" },\n",
" messages=[\n",
" {\n",
Expand Down Expand Up @@ -491,7 +491,7 @@
" \"\"\"\n",
" \n",
" response = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" model=\"gpt-4o-mini\",\n",
" response_format={ \"type\": \"json_object\" },\n",
" messages=[\n",
" {\n",
Expand Down Expand Up @@ -813,7 +813,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
"\n",
"GPT_MODEL = \"gpt-4o\"\n",
"GPT_MODEL = \"gpt-4o-mini\"\n",
"\n",
"client = OpenAI()"
]
Expand Down Expand Up @@ -179,7 +179,7 @@
" {\"role\": \"assistant\", \"content\": assistant_policy_example_1},\n",
" {\"role\": \"user\", \"content\": input_message},\n",
" ],\n",
" model=\"gpt-4o\"\n",
" model=\"gpt-4o-mini\"\n",
" )\n",
" \n",
" return response.choices[0].message.content\n",
Expand Down Expand Up @@ -378,7 +378,7 @@
" ]\n",
"\n",
" response = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" model=\"gpt-4o-mini\",\n",
" messages=messages,\n",
" temperature=0.7,\n",
" n=10,\n",
Expand Down Expand Up @@ -695,7 +695,7 @@
" ]\n",
"\n",
" response = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" model=\"gpt-4o-mini\",\n",
" messages=messages,\n",
" temperature=0.7,\n",
" n=10\n",
Expand Down Expand Up @@ -844,7 +844,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
"\n",
"GPT_MODEL = \"gpt-4o\"\n",
"GPT_MODEL = \"gpt-4o-mini\"\n",
"\n",
"client = OpenAI()"
]
Expand Down Expand Up @@ -451,7 +451,7 @@
"source": [
"### Parallel Function Calling\n",
"\n",
"Newer models such as gpt-4o or gpt-3.5-turbo can call multiple functions in one turn."
"Newer models such as gpt-4o-mini or gpt-3.5-turbo can call multiple functions in one turn."
]
},
{
Expand Down Expand Up @@ -700,7 +700,7 @@
"}]\n",
"\n",
"response = client.chat.completions.create(\n",
" model='gpt-4o', \n",
" model='gpt-4o-mini', \n",
" messages=messages, \n",
" tools= tools, \n",
" tool_choice=\"auto\"\n",
Expand Down Expand Up @@ -747,7 +747,7 @@
" # Step 4: Invoke the chat completions API with the function response appended to the messages list\n",
" # Note that messages with role 'tool' must be a response to a preceding message with 'tool_calls'\n",
" model_response_with_function_call = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" model=\"gpt-4o-mini\",\n",
" messages=messages,\n",
" ) # get a new response from the model where it can see the function response\n",
" print(model_response_with_function_call.choices[0].message.content)\n",
Expand All @@ -766,7 +766,7 @@
"source": [
"## Next Steps\n",
"\n",
"See our other notebook [Data extraction and transformation](Data_extraction_transformation.ipynb) which shows how to extract data from documents using gpt-4o."
"See our other notebook [Data extraction and transformation](Data_extraction_transformation.ipynb) which shows how to extract data from documents using gpt-4o-mini."
]
}
],
Expand All @@ -786,7 +786,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
}
},
"nbformat": 4,
Expand Down
6 changes: 3 additions & 3 deletions cookbooks/python/openai/How_to_stream_completions.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
"os.environ[\"OPENAI_API_KEY\"] = os.getenv(\"GITHUB_TOKEN\")\n",
"os.environ[\"OPENAI_BASE_URL\"] = \"https://models.inference.ai.azure.com/\"\n",
"\n",
"GPT_MODEL = \"gpt-4o\"\n",
"GPT_MODEL = \"gpt-4o-mini\"\n",
"\n",
"client = OpenAI()"
]
Expand Down Expand Up @@ -169,7 +169,7 @@
"source": [
"### 3. How much time is saved by streaming a chat completion\n",
"\n",
"Now let's ask `gpt-4o` to count to 100 again, and see how long it takes."
"Now let's ask `gpt-4o-mini` to count to 100 again, and see how long it takes."
]
},
{
Expand Down Expand Up @@ -255,7 +255,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
"version": "3.11.9"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
2 changes: 1 addition & 1 deletion cookbooks/python/openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,6 @@ The samples were modified slightly to better run with the GitHub Models service.

- [How to process image and video with GPT-4](how_to_process_image_and_video_with_gpt4o.ipynb): This notebook shows how to process images and videos with GPT-4.
- [How to call functions with chat models](How_to_call_functions_with_chat_models.ipynb): This notebook shows how to get GPT-4o to determing which of a set of functions to call to answer a user's question.
- [Data extraction and transformation](Data_extraction_transformation.ipynb): This notebook shows how to extract data from documents using gpt-4o.
- [Data extraction and transformation](Data_extraction_transformation.ipynb): This notebook shows how to extract data from documents using gpt-4o-mini.
- [How to stream completions](How_to_stream_completions.ipynb): This notebook shows detailed instructions on how to stream chat completions.
- [Developing Hallucination Guardrails](Developing_hallucination_guardrails.ipynb): Develop an output guardrail that specifically checks model outputs for hallucinations
Loading

0 comments on commit bb1756e

Please sign in to comment.