projects: OpenAPI Utopia (#2556)

This commit is contained in:
Sam Judelson 2024-05-06 08:48:09 -04:00 committed by GitHub
parent 9741c41356
commit b2a77f06b9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 1005 additions and 0 deletions

View file

@ -0,0 +1,15 @@
# Generated by Cargo
# will have compiled files and executables
/target/
pkg
# These are backup files generated by rustfmt
**/*.rs.bk
# node e2e test tools and outputs
node_modules/
test-results/
end2end/playwright-report/
playwright/.cache/
.secret_key

View file

@ -0,0 +1,117 @@
[package]
name = "openapi-openai-api-swagger-ui"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
axum = { version = "0.7", optional = true }
console_error_panic_hook = "0.1"
leptos = { version = "0.6", features = ["nightly"] }
leptos_axum = { version = "0.6", optional = true }
leptos_meta = { version = "0.6", features = ["nightly"] }
leptos_router = { version = "0.6", features = ["nightly"] }
tokio = { version = "1", features = ["rt-multi-thread"], optional = true }
tower = { version = "0.4", optional = true }
tower-http = { version = "0.5", features = ["fs"], optional = true }
wasm-bindgen = "=0.2.92"
thiserror = "1"
tracing = { version = "0.1", optional = true }
utoipa = { version = "4.2.0", optional = true, features=["debug"] }
utoipa-swagger-ui = { version = "6.0.0", optional = true , features = ["axum"]}
http = "1"
serde = "1.0.198"
serde_json = {version = "1.0.116", optional = true}
openai_dive = {version="0.4.7",optional=true}
reqwest = "0.12.4"
uuid = { version = "1.8.0", features = ["v4"]}
[features]
hydrate = ["leptos/hydrate", "leptos_meta/hydrate", "leptos_router/hydrate"]
ssr = [
"dep:openai_dive",
"dep:serde_json",
"dep:utoipa-swagger-ui",
"dep:utoipa",
"dep:axum",
"dep:tokio",
"dep:tower",
"dep:tower-http",
"dep:leptos_axum",
"leptos/ssr",
"leptos_meta/ssr",
"leptos_router/ssr",
"dep:tracing",
]
# Defines a size-optimized profile for the WASM bundle in release mode
[profile.wasm-release]
inherits = "release"
opt-level = 'z'
lto = true
codegen-units = 1
panic = "abort"
[package.metadata.leptos]
# The name used by wasm-bindgen/cargo-leptos for the JS/WASM bundle. Defaults to the crate name
output-name = "openapi-swagger-ui"
# The site root folder is where cargo-leptos generate all output. WARNING: all content of this folder will be erased on a rebuild. Use it in your server setup.
site-root = "target/site"
# The site-root relative folder where all compiled output (JS, WASM and CSS) is written
# Defaults to pkg
site-pkg-dir = "pkg"
# [Optional] The source CSS file. If it ends with .sass or .scss then it will be compiled by dart-sass into CSS. The CSS is optimized by Lightning CSS before being written to <site-root>/<site-pkg>/app.css
style-file = "style/main.scss"
# Assets source dir. All files found here will be copied and synchronized to site-root.
# The assets-dir cannot have a sub directory with the same name/path as site-pkg-dir.
#
# Optional. Env: LEPTOS_ASSETS_DIR.
assets-dir = "public"
# The IP and port (ex: 127.0.0.1:3000) where the server serves the content. Use it in your server setup.
site-addr = "127.0.0.1:3000"
# The port to use for automatic reload monitoring
reload-port = 3001
# [Optional] Command to use when running end2end tests. It will run in the end2end dir.
# [Windows] for non-WSL use "npx.cmd playwright test"
# This binary name can be checked in Powershell with Get-Command npx
end2end-cmd = "npx playwright test"
end2end-dir = "end2end"
# The browserlist query used for optimizing the CSS.
browserquery = "defaults"
# The environment Leptos will run in, usually either "DEV" or "PROD"
env = "DEV"
# The features to use when compiling the bin target
#
# Optional. Can be over-ridden with the command line parameter --bin-features
bin-features = ["ssr"]
# If the --no-default-features flag should be used when compiling the bin target
#
# Optional. Defaults to false.
bin-default-features = false
# The features to use when compiling the lib target
#
# Optional. Can be over-ridden with the command line parameter --lib-features
lib-features = ["hydrate"]
# If the --no-default-features flag should be used when compiling the lib target
#
# Optional. Defaults to false.
lib-default-features = false
# The profile to use for the lib target when compiling for release
#
# Optional. Defaults to "release".
lib-profile-release = "wasm-release"

View file

@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>

View file

@ -0,0 +1,15 @@
#OpenAPI Swagger-Ui OpenAI GPT
This example shows how to document server functions via OpenAPI schema generated using Utoipa and serve the swagger ui via /swagger-ui endpoint. More than that, this example shows how to take said OpenAPI spec and turn it into a function list to feed to OpenAI's chat completion endpoint to generate the JSON values to feed back into our server functions.
The example shows an input and if you tell it to do something that is covered, say hello, or generate a list of names it will do that.
To use the AI part of this project provide your openAPI key in an environment variable when running cargo leptos.
```sh
OPENAI_API_KEY=my_secret_key cargo leptos serve
```
## Thoughts, Feedback, Criticism, Comments?
Send me any of the above, I'm @sjud on leptos discord. I'm always looking to improve and make these projects more helpful for the community. So please let me know how I can do that. Thanks!

View file

@ -0,0 +1,74 @@
{
"name": "end2end",
"version": "1.0.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "end2end",
"version": "1.0.0",
"license": "ISC",
"devDependencies": {
"@playwright/test": "^1.28.0"
}
},
"node_modules/@playwright/test": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.28.0.tgz",
"integrity": "sha512-vrHs5DFTPwYox5SGKq/7TDn/S4q6RA1zArd7uhO6EyP9hj3XgZBBM12ktMbnDQNxh/fL1IUKsTNLxihmsU38lQ==",
"dev": true,
"dependencies": {
"@types/node": "*",
"playwright-core": "1.28.0"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@types/node": {
"version": "18.11.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz",
"integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==",
"dev": true
},
"node_modules/playwright-core": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.28.0.tgz",
"integrity": "sha512-nJLknd28kPBiCNTbqpu6Wmkrh63OEqJSFw9xOfL9qxfNwody7h6/L3O2dZoWQ6Oxcm0VOHjWmGiCUGkc0X3VZA==",
"dev": true,
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=14"
}
}
},
"dependencies": {
"@playwright/test": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.28.0.tgz",
"integrity": "sha512-vrHs5DFTPwYox5SGKq/7TDn/S4q6RA1zArd7uhO6EyP9hj3XgZBBM12ktMbnDQNxh/fL1IUKsTNLxihmsU38lQ==",
"dev": true,
"requires": {
"@types/node": "*",
"playwright-core": "1.28.0"
}
},
"@types/node": {
"version": "18.11.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.11.9.tgz",
"integrity": "sha512-CRpX21/kGdzjOpFsZSkcrXMGIBWMGNIHXXBVFSH+ggkftxg+XYP20TESbh+zFvFj3EQOl5byk0HTRn1IL6hbqg==",
"dev": true
},
"playwright-core": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.28.0.tgz",
"integrity": "sha512-nJLknd28kPBiCNTbqpu6Wmkrh63OEqJSFw9xOfL9qxfNwody7h6/L3O2dZoWQ6Oxcm0VOHjWmGiCUGkc0X3VZA==",
"dev": true
}
}
}

View file

@ -0,0 +1,13 @@
{
"name": "end2end",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@playwright/test": "^1.28.0"
}
}

View file

@ -0,0 +1,107 @@
import type { PlaywrightTestConfig } from "@playwright/test";
import { devices } from "@playwright/test";
/**
* Read environment variables from file.
* https://github.com/motdotla/dotenv
*/
// require('dotenv').config();
/**
* See https://playwright.dev/docs/test-configuration.
*/
const config: PlaywrightTestConfig = {
testDir: "./tests",
/* Maximum time one test can run for. */
timeout: 30 * 1000,
expect: {
/**
* Maximum time expect() should wait for the condition to be met.
* For example in `await expect(locator).toHaveText();`
*/
timeout: 5000,
},
/* Run tests in files in parallel */
fullyParallel: true,
/* Fail the build on CI if you accidentally left test.only in the source code. */
forbidOnly: !!process.env.CI,
/* Retry on CI only */
retries: process.env.CI ? 2 : 0,
/* Opt out of parallel tests on CI. */
workers: process.env.CI ? 1 : undefined,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: "html",
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
actionTimeout: 0,
/* Base URL to use in actions like `await page.goto('/')`. */
// baseURL: 'http://localhost:3000',
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: "on-first-retry",
},
/* Configure projects for major browsers */
projects: [
{
name: "chromium",
use: {
...devices["Desktop Chrome"],
},
},
{
name: "firefox",
use: {
...devices["Desktop Firefox"],
},
},
{
name: "webkit",
use: {
...devices["Desktop Safari"],
},
},
/* Test against mobile viewports. */
// {
// name: 'Mobile Chrome',
// use: {
// ...devices['Pixel 5'],
// },
// },
// {
// name: 'Mobile Safari',
// use: {
// ...devices['iPhone 12'],
// },
// },
/* Test against branded browsers. */
// {
// name: 'Microsoft Edge',
// use: {
// channel: 'msedge',
// },
// },
// {
// name: 'Google Chrome',
// use: {
// channel: 'chrome',
// },
// },
],
/* Folder for test artifacts such as screenshots, videos, traces, etc. */
// outputDir: 'test-results/',
/* Run your local dev server before starting the tests */
// webServer: {
// command: 'npm run start',
// port: 3000,
// },
};
export default config;

View file

@ -0,0 +1,9 @@
import { test, expect } from "@playwright/test";
test("homepage has title and links to intro page", async ({ page }) => {
await page.goto("http://localhost:3000/");
await expect(page).toHaveTitle("Welcome to Leptos");
await expect(page.locator("h1")).toHaveText("Welcome to Leptos!");
});

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View file

@ -0,0 +1,3 @@
[toolchain]
channel = "nightly"

View file

@ -0,0 +1,174 @@
use crate::error_template::{AppError, ErrorTemplate};
use leptos::*;
use leptos_meta::*;
use leptos_router::*;
#[component]
pub fn App() -> impl IntoView {
// Provides context that manages stylesheets, titles, meta tags, etc.
provide_meta_context();
view! {
// injects a stylesheet into the document <head>
// id=leptos means cargo-leptos will hot-reload this stylesheet
<Stylesheet id="leptos" href="/pkg/openapi-swagger-ui.css"/>
// sets the document title
<Title text="Welcome to Leptos"/>
// content for this welcome page
<Router fallback=|| {
let mut outside_errors = Errors::default();
outside_errors.insert_with_default_key(AppError::NotFound);
view! {
<ErrorTemplate outside_errors/>
}
.into_view()
}>
<main>
<Routes>
<Route path="" view=HomePage/>
</Routes>
</main>
</Router>
}
}
/// Renders the home page of your application.
#[component]
fn HomePage() -> impl IntoView {
let hello = Action::<HelloWorld,_>::server();
view! {
<button on:click = move |_| hello.dispatch(HelloWorld{say_whut:SayHello{say:true}})>
"hello world"
</button>
<ErrorBoundary
fallback=|err| view! { <p>{format!("{err:#?}")}</p>}>
{
move || hello.value().get().map(|h|match h {
Ok(h) => h.into_view(),
err => err.into_view()
})
}
</ErrorBoundary>
<AiSayHello/>
}
}
#[cfg_attr(feature="ssr",derive(utoipa::ToSchema))]
#[derive(Debug,Copy,Clone,serde::Serialize,serde::Deserialize)]
pub struct SayHello {
say:bool,
}
// the following function comment is what our GPT will get
/// Call to say hello world, or call to not say hello world.
#[cfg_attr(feature="ssr",utoipa::path(
post,
path = "/api/hello_world",
responses(
(status = 200, description = "Hello world from server or maybe not?", body = String),
),
params(
("say_whut" = SayHello, description = "If true then say hello, if false then don't."),
)
))]
#[server(
// we need to encoude our server functions as json because that's what openai generates
input=server_fn::codec::Json,
endpoint="hello_world"
)]
pub async fn hello_world(say_whut:SayHello) -> Result<String,ServerFnError> {
if say_whut.say {
Ok("hello world".to_string())
} else {
Ok("not hello".to_string())
}
}
/// Takes a list of names
#[cfg_attr(feature="ssr",utoipa::path(
post,
path = "/api/name_list",
responses(
(status = 200, description = "The same list you got back", body = String),
),
params(
("list" = Vec<String>, description = "A list of names"),
)
))]
#[server(
input=server_fn::codec::Json,
endpoint="name_list"
)]
pub async fn name_list(list:Vec<String>) -> Result<Vec<String>,ServerFnError> {
Ok(list)
}
#[derive(Clone,Debug,PartialEq,serde::Serialize,serde::Deserialize)]
pub struct AiServerCall{
pub path:String,
pub args:String,
}
// Don't include our AI function in the OpenAPI
#[server]
pub async fn ai_msg(msg:String) -> Result<AiServerCall,ServerFnError> {
crate::open_ai::call_gpt_with_api(msg).await.get(0).cloned().ok_or(ServerFnError::new("No first message"))
}
#[component]
pub fn AiSayHello() -> impl IntoView {
let ai_msg = Action::<AiMsg, _>::server();
let result = create_rw_signal(Vec::new());
view!{
<ActionForm action=ai_msg>
<label> "Tell the AI what function to call."
<input name="msg"/>
</label>
<input type="submit"/>
</ActionForm>
<div>
{
move || if let Some(Ok(AiServerCall{path,args})) = ai_msg.value().get() {
spawn_local(async move {
let text =
reqwest::Client::new()
.post(format!("http://127.0.0.1:3000/api/{}",path))
.header("content-type","application/json")
.body(args)
.send()
.await
.unwrap()
.text()
.await
.unwrap();
result.update(|list|
list.push(
text
)
);
});
}
}
<For
each=move || result.get()
key=|_| uuid::Uuid::new_v4()
children=move |s:String| {
view! {
<p>{s}</p>
}
}
/>
</div>
}
}

View file

@ -0,0 +1,72 @@
use http::status::StatusCode;
use leptos::*;
use thiserror::Error;
#[derive(Clone, Debug, Error)]
pub enum AppError {
#[error("Not Found")]
NotFound,
}
impl AppError {
pub fn status_code(&self) -> StatusCode {
match self {
AppError::NotFound => StatusCode::NOT_FOUND,
}
}
}
// A basic function to display errors served by the error boundaries.
// Feel free to do more complicated things here than just displaying the error.
#[component]
pub fn ErrorTemplate(
#[prop(optional)] outside_errors: Option<Errors>,
#[prop(optional)] errors: Option<RwSignal<Errors>>,
) -> impl IntoView {
let errors = match outside_errors {
Some(e) => create_rw_signal(e),
None => match errors {
Some(e) => e,
None => panic!("No Errors found and we expected errors!"),
},
};
// Get Errors from Signal
let errors = errors.get_untracked();
// Downcast lets us take a type that implements `std::error::Error`
let errors: Vec<AppError> = errors
.into_iter()
.filter_map(|(_k, v)| v.downcast_ref::<AppError>().cloned())
.collect();
println!("Errors: {errors:#?}");
// Only the response code for the first error is actually sent from the server
// this may be customized by the specific application
#[cfg(feature = "ssr")]
{
use leptos_axum::ResponseOptions;
let response = use_context::<ResponseOptions>();
if let Some(response) = response {
response.set_status(errors[0].status_code());
}
}
view! {
<h1>{if errors.len() > 1 {"Errors"} else {"Error"}}</h1>
<For
// a function that returns the items we're iterating over; a signal is fine
each= move || {errors.clone().into_iter().enumerate()}
// a unique key for each item as a reference
key=|(index, _error)| *index
// renders each item to a view
children=move |error| {
let error_string = error.1.to_string();
let error_code= error.1.status_code();
view! {
<h2>{error_code.to_string()}</h2>
<p>"Error: " {error_string}</p>
}
}
/>
}
}

View file

@ -0,0 +1,42 @@
use axum::{
body::Body,
extract::State,
response::IntoResponse,
http::{Request, Response, StatusCode, Uri},
};
use axum::response::Response as AxumResponse;
use tower::ServiceExt;
use tower_http::services::ServeDir;
use leptos::*;
use crate::app::App;
pub async fn file_and_error_handler(uri: Uri, State(options): State<LeptosOptions>, req: Request<Body>) -> AxumResponse {
let root = options.site_root.clone();
let res = get_static_file(uri.clone(), &root).await.unwrap();
if res.status() == StatusCode::OK {
res.into_response()
} else {
let handler = leptos_axum::render_app_to_stream(options.to_owned(), App);
handler(req).await.into_response()
}
}
async fn get_static_file(
uri: Uri,
root: &str,
) -> Result<Response<Body>, (StatusCode, String)> {
let req = Request::builder()
.uri(uri.clone())
.body(Body::empty())
.unwrap();
// `ServeDir` implements `tower::Service` so we can call it with `tower::ServiceExt::oneshot`
// This path is relative to the cargo root
match ServeDir::new(root).oneshot(req).await {
Ok(res) => Ok(res.into_response()),
Err(err) => Err((
StatusCode::INTERNAL_SERVER_ERROR,
format!("Something went wrong: {err}"),
)),
}
}

View file

@ -0,0 +1,27 @@
pub mod app;
pub mod error_template;
#[cfg(feature = "ssr")]
pub mod fileserv;
#[cfg(feature="ssr")]
pub mod open_ai;
#[cfg(feature = "hydrate")]
#[wasm_bindgen::prelude::wasm_bindgen]
pub fn hydrate() {
use crate::app::*;
console_error_panic_hook::set_once();
leptos::mount_to_body(App);
}
#[cfg(feature="ssr")]
pub mod api_doc {
use crate::app::__path_hello_world;
use crate::app::SayHello;
use crate::app::__path_name_list;
#[derive(utoipa::OpenApi)]
#[openapi(
info(description = "My Api description"),
paths(hello_world,name_list), components(schemas(SayHello)),
)]
pub struct ApiDoc;
}

View file

@ -0,0 +1,42 @@
#[cfg(feature = "ssr")]
#[tokio::main]
async fn main() {
use axum::Router;
use leptos::*;
use leptos_axum::{generate_route_list, LeptosRoutes};
use openapi_swagger_ui::app::*;
use openapi_swagger_ui::api_doc::ApiDoc;
use openapi_swagger_ui::fileserv::file_and_error_handler;
use utoipa::OpenApi;
// Setting get_configuration(None) means we'll be using cargo-leptos's env values
// For deployment these variables are:
// <https://github.com/leptos-rs/start-axum#executing-a-server-on-a-remote-machine-without-the-toolchain>
// Alternately a file can be specified such as Some("Cargo.toml")
// The file would need to be included with the executable when moved to deployment
let conf = get_configuration(None).await.unwrap();
let leptos_options = conf.leptos_options;
let addr = leptos_options.site_addr;
let routes = generate_route_list(App);
// build our application with a route
let app = Router::new()
.leptos_routes(&leptos_options, routes, App)
.fallback(file_and_error_handler)
.merge(utoipa_swagger_ui::SwaggerUi::new("/swagger-ui")
.url("/api-docs/openapi.json", ApiDoc::openapi()))
.with_state(leptos_options);
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
logging::log!("listening on http://{}", &addr);
axum::serve(listener, app.into_make_service())
.await
.unwrap();
}
#[cfg(not(feature = "ssr"))]
pub fn main() {
// no client-side main function
// unless we want this to work with e.g., Trunk for a purely client-side app
// see lib.rs for hydration function instead
}

View file

@ -0,0 +1,267 @@
/*
Follows
https://cookbook.openai.com/examples/function_calling_with_an_openapi_spec
closely
*/
pub static SYSTEM_MESSAGE :&'static str = "
You are a helpful assistant.
Respond to the following prompt by using function_call and then summarize actions.
Ask for clarification if a user request is ambiguous.
";
use serde_json::Map;
use openai_dive::v1::api::Client;
use openai_dive::v1::models::Gpt4Engine;
use std::env;
use openai_dive::v1::resources::chat::{
ChatCompletionFunction, ChatCompletionParameters, ChatCompletionTool, ChatCompletionToolType, ChatMessage,
ChatMessageContent,Role,
};
use utoipa::openapi::schema::Array;
use serde_json::Value;
use utoipa::openapi::schema::SchemaType;
use utoipa::openapi::schema::Schema;
use utoipa::OpenApi;
use serde_json::json;
use utoipa::openapi::path::{PathItemType,Parameter};
use utoipa::openapi::Required;
use utoipa::openapi::schema::Object;
use utoipa::openapi::RefOr;
pub fn make_openapi_call_via_gpt(message:String) -> ChatCompletionParameters {
let docs = super::api_doc::ApiDoc::openapi();
let mut functions = vec![];
// get each path and it's path item object
for (path,path_item) in docs.paths.paths.iter(){
// all our server functions are post.
let operation = path_item.operations.get(&PathItemType::Post).expect("Expect POST op");
// This name will be given to the OpenAI API as part of our functions
let name = operation.operation_id.clone().expect("Each operation to have an operation id");
// we'll use the descrition
let desc = operation.description.clone().expect("Each operation to have a description, this is how GPT knows what the functiond does and it is helpful for calling it.");
let mut required_list = vec![];
let mut properties = serde_json::Map::new();
if let Some(params) = operation.parameters.clone() {
leptos::logging::log!("{params:#?}");
for Parameter{name,description,required,schema,..} in params.into_iter() {
if required == Required::True {
required_list.push(name.clone());
}
let description = description.unwrap_or_default();
if let Some(RefOr::Ref(utoipa::openapi::schema::Ref{ref_location,..})) = schema {
let schema_name = ref_location.split('/').last().expect("Expecting last after split");
let RefOr::T(schema) = docs.components
.as_ref()
.expect("components")
.schemas
.get(schema_name)
.cloned()
.expect("{schema_name} to be in components as a schema") else {panic!("expecting T")};
let mut output = Map::new();
parse_schema_into_openapi_property(name.clone(),schema,&mut output);
properties.insert(name,serde_json::Value::Object(output));
} else if let Some(RefOr::T(schema)) = schema {
let mut output = Map::new();
parse_schema_into_openapi_property(name.clone(),schema,&mut output);
properties.insert(name.clone(),serde_json::Value::Object(output));
}
}
}
let parameters = json!({
"type": "object",
"properties": properties,
"required": required_list,
});
leptos::logging::log!("{parameters}");
functions.push(
ChatCompletionFunction {
name,
description: Some(desc),
parameters,
}
)
}
ChatCompletionParameters {
model: Gpt4Engine::Gpt41106Preview.to_string(),
messages: vec![
ChatMessage {
role:Role::System,
content: ChatMessageContent::Text(SYSTEM_MESSAGE.to_string()),
..Default::default()
},
ChatMessage {
role:Role::User,
content: ChatMessageContent::Text(message),
..Default::default()
}],
tools: Some(functions.into_iter().map(|function|{
ChatCompletionTool {
r#type: ChatCompletionToolType::Function,
function,
}
}).collect::<Vec<ChatCompletionTool>>()),
..Default::default()
}
}
pub fn parse_schema_into_openapi_property(
name:String,
schema:Schema,
output: &mut serde_json::Map::<String,serde_json::Value>) {
let docs = super::api_doc::ApiDoc::openapi();
match schema {
Schema::Object(Object{
schema_type,
required,
properties,
..
}) => match schema_type{
SchemaType::Object => {
output.insert("type".to_string(),Value::String("object".to_string()));
output.insert("required".to_string(),Value::Array(required.into_iter()
.map(|s|Value::String(s))
.collect::<Vec<Value>>()));
output.insert("properties".to_string(),{
let mut map = Map::new();
for (key,val) in properties
.into_iter()
.map(|(key,val)|{
let RefOr::T(schema) = val else {panic!("expecting t")};
let mut output = Map::new();
parse_schema_into_openapi_property(name.clone(),schema,&mut output);
(key,output)
}) {
map.insert(key,Value::Object(val));
}
Value::Object(map)
});
},
SchemaType::Value => {
panic!("not expecting Value here.");
},
SchemaType::String => {
output.insert("type".to_string(),serde_json::Value::String("string".to_string()));
},
SchemaType::Integer => {
output.insert("type".to_string(),serde_json::Value::String("integer".to_string()));
},
SchemaType::Number => {
output.insert("type".to_string(),serde_json::Value::String("number".to_string()));
},
SchemaType::Boolean => {
output.insert("type".to_string(),serde_json::Value::String("boolean".to_string()));
},
SchemaType::Array => {
output.insert("type".to_string(),serde_json::Value::String("array".to_string()));
},
},
Schema::Array(Array{schema_type,items,..}) => {
match schema_type {
SchemaType::Array => {
let mut map = Map::new();
if let RefOr::Ref(utoipa::openapi::schema::Ref{ref_location,..}) = *items {
let schema_name = ref_location.split('/').last().expect("Expecting last after split");
let RefOr::T(schema) = docs.components
.as_ref()
.expect("components")
.schemas
.get(schema_name)
.cloned()
.expect("{schema_name} to be in components as a schema") else {panic!("expecting T")};
let mut map = Map::new();
parse_schema_into_openapi_property(name.clone(),schema,&mut map);
output.insert(name.clone(),serde_json::Value::Object(map));
} else if let RefOr::T(schema) = *items {
let mut map = Map::new();
parse_schema_into_openapi_property(name.clone(),schema,&mut map);
output.insert(name,serde_json::Value::Object(map));
}
},
_ => panic!("if schema is an array, then I'm expecting schema type to be an array ")
}
}
_ => panic!("I don't know how to handle this yet.")
}
}
// let docs = super::api_doc::ApiDoc::openapi();
use crate::app::AiServerCall;
pub async fn call_gpt_with_api(message:String) -> Vec<AiServerCall> {
let api_key = std::env::var("OPENAI_API_KEY").expect("$OPENAI_API_KEY is not set");
let client = Client::new(api_key);
let completion_parameters = make_openapi_call_via_gpt(message);
let result = client.chat().create(completion_parameters).await.unwrap();
let message = result.choices[0].message.clone();
let mut res = vec![];
if let Some(tool_calls) = message.clone().tool_calls {
for tool_call in tool_calls {
let name = tool_call.function.name;
let arguments = tool_call.function.arguments;
res.push(AiServerCall{
path:name,
args:arguments,
});
}
}
res
}
/*
def openapi_to_functions(openapi_spec):
functions = []
for path, methods in openapi_spec["paths"].items():
for method, spec_with_ref in methods.items():
# 1. Resolve JSON references.
spec = jsonref.replace_refs(spec_with_ref)
# 2. Extract a name for the functions.
function_name = spec.get("operationId")
# 3. Extract a description and parameters.
desc = spec.get("description") or spec.get("summary", "")
schema = {"type": "object", "properties": {}}
req_body = (
spec.get("requestBody", {})
.get("content", {})
.get("application/json", {})
.get("schema")
)
if req_body:
schema["properties"]["requestBody"] = req_body
params = spec.get("parameters", [])
if params:
param_properties = {
param["name"]: param["schema"]
for param in params
if "schema" in param
}
schema["properties"]["parameters"] = {
"type": "object",
"properties": param_properties,
}
functions.append(
{"type": "function", "function": {"name": function_name, "description": desc, "parameters": schema}}
)
return functions */

View file

@ -0,0 +1,4 @@
body {
font-family: sans-serif;
text-align: center;
}