diff --git a/.gitignore b/.gitignore index 54dfe61..2d7a42e 100644 --- a/.gitignore +++ b/.gitignore @@ -56,3 +56,6 @@ Cargo.lock backend/target/ +.env + +.env diff --git a/backend/.gitignore b/backend/.gitignore new file mode 100644 index 0000000..4c49bd7 --- /dev/null +++ b/backend/.gitignore @@ -0,0 +1 @@ +.env diff --git a/backend/Cargo.toml b/backend/Cargo.toml index 8589859..7e278a8 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -6,10 +6,14 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -tokio = { version= "1", features = ["full"] } +openai_api_rust = "0.1.8" +diesel = { version = "2.1.0", features = ["postgres"] } +dotenvy = "0.15" +uuid = { version = "1.7.0", features = ["v5", "fast-rng", "macro-diagnostics"] } +tokio = { version = "1", features = ["full"] } tokio-stream = "0.1.6" +async-trait = "0.1.77" warp = "0.3" serde = { version = "1.0", features = ["derive"]} serde_json = "1.0" futures = { version = "0.3", default-features=false} -uuid = { version = "1.1.2", features = ["v4", "fast-rng", "macro-diagnostics"]} diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 0000000..da43389 --- /dev/null +++ b/backend/README.md @@ -0,0 +1,6 @@ +# Setup +1. Install Rust +2. cargo build +3. echo DATABASE_URL=postgres://username:password@localhost/diesel_demo > .env +4. diesel setup +5. cargo run diff --git a/backend/src/cache.rs b/backend/src/cache.rs new file mode 100644 index 0000000..ca27e21 --- /dev/null +++ b/backend/src/cache.rs @@ -0,0 +1,22 @@ +use crate::kube::{KubeId, Kube}; +use std::vec::Vec; + +pub struct Recipe { + items: Vec, +} + +impl Recipe { + pub fn new(items: Vec) -> Self { + let mut items = items; + items.sort(); + Recipe { items } + } + pub fn hash(&self) -> u64 { + let big_key = self.items.iter().fold(0, |acc, x| acc ^ x.as_u128()); + (big_key >> 64 & big_key) as u64 + } +} + +pub struct PsqlCache { + +} diff --git a/backend/src/cache_test.rs b/backend/src/cache_test.rs new file mode 100644 index 0000000..19ae28e --- /dev/null +++ b/backend/src/cache_test.rs @@ -0,0 +1,20 @@ +#[cfg(test)] +mod tests { + // Note this useful idiom: importing names from outer (for mod tests) scope. + use super::*; + + #[test] + + fn test_kube_cache_hash_is_not_order_sensitive() { + let mut items = vec![]; + items.push(KubeId::new("a")); + items.push(KubeId::new("b")); + items.push(KubeId::new("b")); + items.push(KubeId::new("c")); + let recipe1 = Recipe::new(items.clone()); + + items.reverse(); + let recipe2 = Recipe::new(items.clone()); + assert_eq!(recipe1.hash(), recipe2.hash()); + } +} diff --git a/backend/src/kube.rs b/backend/src/kube.rs index 33e690b..5c82bcd 100644 --- a/backend/src/kube.rs +++ b/backend/src/kube.rs @@ -1,7 +1,36 @@ -#[derive(Debug, PartialEq)] +use uuid::Uuid; + +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct KubeId { + uuid: Uuid, +} + +impl KubeId { + pub fn new(name: &str) -> Self { + let mut name = name.to_string(); + name.push_str("kube"); + KubeId { + uuid: Uuid::new_v5(&Uuid::NAMESPACE_DNS, name.as_bytes()), + } + } + + pub fn as_u128(&self) -> u128 { + self.uuid.as_u128() + } +} + +#[derive(PartialEq, Debug)] pub struct Kube { - name: String, - uuid: String, //uuid type? + pub id: KubeId, + pub name: String, +} +impl Kube { + pub fn new(name: String) -> Kube { + Kube { + id: KubeId::new(name.as_str()), + name, + } + } } // we should have a placeholder ''loading'' cube we can send over if api is slow diff --git a/backend/src/lib.rs b/backend/src/lib.rs index 824e2f3..e76e302 100644 --- a/backend/src/lib.rs +++ b/backend/src/lib.rs @@ -2,5 +2,7 @@ pub mod grid; pub mod space; pub mod kube; pub mod player; +pub mod llm; +pub mod cache; type Coordinate = [u64; 2]; diff --git a/backend/src/llm.rs b/backend/src/llm.rs new file mode 100644 index 0000000..c94aecb --- /dev/null +++ b/backend/src/llm.rs @@ -0,0 +1,52 @@ +use crate::kube::Kube; +use async_trait::async_trait; + +/// Trait for interacting with an LLM. +#[async_trait] +pub trait LLM { + /// Send a query to the LLM and get a [`std::string::String`] response. + async fn query(input: &str) -> String; + /// Ask the LLM to combine the given Kubes and return a new Kube. + async fn combine(&self, kubes: &[Kube]) -> Kube; +} + +/// A fake LLM that functions very basically, not processing the input in any meaningful way. This is most useful for testing functionality of other features which use LLMs. +pub struct FakeLLM { +} +impl FakeLLM { + fn new() -> FakeLLM { + FakeLLM { } + } +} + +#[async_trait] +impl LLM for FakeLLM { + async fn query(input: &str) -> String { + format!("This is a response to: {input}") + } + async fn combine(&self, kubes: &[Kube]) -> Kube { + let mut new_string = String::new(); + for kube in kubes { + new_string.push_str(kube.name.as_str()); + } + Kube::new(new_string) + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[tokio::test] + async fn fake_combine_test() { + let kubes = vec![ + Kube::new(String::from("water")), + Kube::new(String::from("glass")), + ]; + let fake_llm = FakeLLM::new(); + let response_kube = fake_llm.combine(&kubes).await; + assert_eq!( + String::from("waterglass"), + response_kube.name, + ); + } +} diff --git a/backend/src/schema.rs b/backend/src/schema.rs new file mode 100644 index 0000000..f1dc5de --- /dev/null +++ b/backend/src/schema.rs @@ -0,0 +1,33 @@ +// @generated automatically by Diesel CLI. + +diesel::table! { + kube_recipe_lines (recipe_id) { + recipe_id -> Uuid, + input_id -> Uuid, + } +} + +diesel::table! { + kube_recipes (id) { + id -> Uuid, + output_id -> Uuid, + } +} + +diesel::table! { + kubes (id) { + id -> Uuid, + #[max_length = 255] + name -> Varchar, + } +} + +diesel::joinable!(kube_recipe_lines -> kube_recipes (recipe_id)); +diesel::joinable!(kube_recipe_lines -> kubes (input_id)); +diesel::joinable!(kube_recipes -> kubes (output_id)); + +diesel::allow_tables_to_appear_in_same_query!( + kube_recipe_lines, + kube_recipes, + kubes, +); diff --git a/diesel.toml b/diesel.toml new file mode 100644 index 0000000..be61e3e --- /dev/null +++ b/diesel.toml @@ -0,0 +1,9 @@ +# For documentation on how to configure this file, +# see https://diesel.rs/guides/configuring-diesel-cli + +[print_schema] +file = "backend/src/schema.rs" +custom_type_derives = ["diesel::query_builder::QueryId"] + +[migrations_directory] +dir = "migrations" diff --git a/migrations/.keep b/migrations/.keep new file mode 100644 index 0000000..e69de29 diff --git a/migrations/00000000000000_diesel_initial_setup/down.sql b/migrations/00000000000000_diesel_initial_setup/down.sql new file mode 100644 index 0000000..a9f5260 --- /dev/null +++ b/migrations/00000000000000_diesel_initial_setup/down.sql @@ -0,0 +1,6 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + +DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); +DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/migrations/00000000000000_diesel_initial_setup/up.sql b/migrations/00000000000000_diesel_initial_setup/up.sql new file mode 100644 index 0000000..d68895b --- /dev/null +++ b/migrations/00000000000000_diesel_initial_setup/up.sql @@ -0,0 +1,36 @@ +-- This file was automatically created by Diesel to setup helper functions +-- and other internal bookkeeping. This file is safe to edit, any future +-- changes will be added to existing projects as new migrations. + + + + +-- Sets up a trigger for the given table to automatically set a column called +-- `updated_at` whenever the row is modified (unless `updated_at` was included +-- in the modified columns) +-- +-- # Example +-- +-- ```sql +-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); +-- +-- SELECT diesel_manage_updated_at('users'); +-- ``` +CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ +BEGIN + EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s + FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); +END; +$$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ +BEGIN + IF ( + NEW IS DISTINCT FROM OLD AND + NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at + ) THEN + NEW.updated_at := current_timestamp; + END IF; + RETURN NEW; +END; +$$ LANGUAGE plpgsql; diff --git a/migrations/2024-03-02-174331_create_init_db/down.sql b/migrations/2024-03-02-174331_create_init_db/down.sql new file mode 100644 index 0000000..2488ed7 --- /dev/null +++ b/migrations/2024-03-02-174331_create_init_db/down.sql @@ -0,0 +1,5 @@ +-- This file should undo anything in `up.sql` + +DROP TABLE kubes_recipe_lines; +DROP TABLE kubes_recipes; +DROP TABLE kubes; diff --git a/migrations/2024-03-02-174331_create_init_db/up.sql b/migrations/2024-03-02-174331_create_init_db/up.sql new file mode 100644 index 0000000..fa9479a --- /dev/null +++ b/migrations/2024-03-02-174331_create_init_db/up.sql @@ -0,0 +1,20 @@ +-- Your SQL goes here + +CREATE TABLE kubes ( + id uuid primary key, + name varchar(255) not null +); + +CREATE INDEX k_i on kubes(name); + +CREATE TABLE kube_recipes ( + id uuid primary key, + output_id uuid not null references kubes(id) +); + +CREATE TABLE kube_recipe_lines ( + recipe_id uuid primary key references kube_recipes(id), + input_id uuid not null references kubes(id) +); + +CREATE INDEX krl_i ON kube_recipe_lines(recipe_id, input_id);