Skip to content
This repository has been archived by the owner on Feb 15, 2025. It is now read-only.

Change protobuf structure, adjust API and LLMs to match #93

Merged
merged 29 commits into from
Jun 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@ data/*
__pycache__
.venv
.ipynb_checkpoints
.vscode/
.vscode/
venv
18 changes: 18 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
FROM cgr.dev/chainguard/go:1.20 as build

WORKDIR /work

ADD go.mod .
ADD go.sum .
ADD api api
ADD pkg pkg
ADD api/models.toml .
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags '-extldflags "-static"' -o app api/main.go


FROM cgr.dev/chainguard/static:latest
COPY --from=build /work/app /app
COPY api/models.toml .

EXPOSE 8080
CMD ["/app"]
70 changes: 58 additions & 12 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,45 +1,91 @@
build: api embeddings

TAG ?= 0.1.2 # want to keep things all aligned here
TAG ?= 0.2.0
# want to keep things all aligned here

.PHONY: api embeddings push

build: api stablelm stablelm-7b embeddings whisper

push:
docker push ghcr.io/defenseunicorns/leapfrogai/api:${TAG}

docker push ghcr.io/defenseunicorns/leapfrogai/stablelm-3b:${TAG}
docker push ghcr.io/defenseunicorns/leapfrogai/embeddings:${TAG}

base:
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/base:${TAG} -f leapfrog/Dockerfile leapfrog

api:
cd api && \
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/api:${TAG} .
api-push:
docker push ghcr.io/defenseunicorns/leapfrogai/api:${TAG}

stablelm:
cd llms/stablelm && \
cd models/llms/stablelm && \
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/stablelm-3b:${TAG} .

stablelm-7b:
cd llms/stablelm-7b && \
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/stablelm-7b:${TAG} .


embeddings:
cd embeddings && \
cd models/text2vec/all-minilm-l6-v2/ && \
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/embeddings:${TAG} .

whisper:
cd models/whisper && \
cd models/speech2text/whisper && \
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/whisper:${TAG} .

whisper-push:
docker push ghcr.io/defenseunicorns/leapfrogai/whisper:${TAG}

repeater:
cd models/test/repeater && \
docker build --network=host -t ghcr.io/defenseunicorns/leapfrogai/repeater:${TAG} .

repeater-push:
docker push ghcr.io/defenseunicorns/leapfrogai/repeater:${TAG}

# This thing is massive, so directly pushing to the zarf registry is quicker/easier
zarf-push-api:
docker tag ghcr.io/defenseunicorns/leapfrogai/api:0.0.1 localhost:5001/defenseunicorns/leapfrogai/api:0.0.1-zarf-1702594131
docker push localhost:5001/defenseunicorns/leapfrogai/api:0.0.1-zarf-1702594131


zarf-port-forward:
kubectl port-forward -n zarf svc/zarf-docker-registry 5001:5000
kubectl port-forward -n zarf svc/zarf-docker-registry 5001:5000


gen: gen-go gen-python


gen-python:
python3 -m grpc_tools.protoc --proto_path=proto/ generate/generate.proto --python_out=leapfrog --pyi_out=leapfrog --grpc_python_out=leapfrog
python3 -m grpc_tools.protoc --proto_path=proto audio/audio.proto --python_out=leapfrog --pyi_out=leapfrog --grpc_python_out=leapfrog
python3 -m grpc_tools.protoc --proto_path=proto embeddings/embeddings.proto --python_out=leapfrog --pyi_out=leapfrog --grpc_python_out=leapfrog
python3 -m grpc_tools.protoc --proto_path=proto name/name.proto --python_out=leapfrog --pyi_out=leapfrog --grpc_python_out=leapfrog


gen-go:
rm -rf pkg/client
mkdir -p pkg/client
protoc --go_out=pkg/client --go_opt=paths=source_relative --go-grpc_out=pkg/client --go-grpc_opt=paths=source_relative --proto_path=proto/ generate/generate.proto
protoc --go_out=pkg/client --go_opt=paths=source_relative --go-grpc_out=pkg/client --go-grpc_opt=paths=source_relative --proto_path=proto/ audio/audio.proto
protoc --go_out=pkg/client --go_opt=paths=source_relative --go-grpc_out=pkg/client --go-grpc_opt=paths=source_relative --proto_path=proto/ name/name.proto
protoc --go_out=pkg/client --go_opt=paths=source_relative --go-grpc_out=pkg/client --go-grpc_opt=paths=source_relative --proto_path=proto/ embeddings/embeddings.proto


update-embeddings: embeddings
docker tag ghcr.io/defenseunicorns/leapfrogai/embeddings:${TAG} localhost:5001/defenseunicorns/leapfrogai/embeddings:${TAG}-zarf-230844594
docker push localhost:5001/defenseunicorns/leapfrogai/embeddings:${TAG}-zarf-230844594
kubectl delete pods -n leapfrogai -l app=embeddings
update-api: api
docker tag ghcr.io/defenseunicorns/leapfrogai/api:${TAG} localhost:5001/defenseunicorns/leapfrogai/api:${TAG}-zarf-1702594131
docker push localhost:5001/defenseunicorns/leapfrogai/api:${TAG}-zarf-1702594131
kubectl delete pods -n leapfrogai -l app=api

update-repeater: repeater
docker tag ghcr.io/defenseunicorns/leapfrogai/repeater:${TAG} localhost:5001/defenseunicorns/leapfrogai/repeater:${TAG}-zarf-4122428005
docker push localhost:5001/defenseunicorns/leapfrogai/repeater:${TAG}-zarf-4122428005
kubectl delete pods -n leapfrogai -l app=repeater

update-stablelm: stablelm
docker tag ghcr.io/defenseunicorns/leapfrogai/stablelm-3b:${TAG} localhost:5001/defenseunicorns/leapfrogai/stablelm-3b:${TAG}-zarf-1442747400
docker push localhost:5001/defenseunicorns/leapfrogai/stablelm-3b:${TAG}-zarf-1442747400
kubectl delete pods -n leapfrogai -l app=stablelm
22 changes: 0 additions & 22 deletions api/Dockerfile

This file was deleted.

31 changes: 31 additions & 0 deletions api/config/config.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
package config

import (
"github.com/BurntSushi/toml"
)

type Config map[string]Model

type Model struct {
Metadata Metadata `toml:"metadata"`
Network Network `toml:"network"`
}

type Metadata struct {
OwnedBy string `toml:"owned_by"`
Permission []string `toml:"permission"`
Description string `toml:"description"`
Tasks []string `toml:"tasks"`
}

type Network struct {
URL string `toml:"url"`
Type string `toml:"type"`
}

func LoadConfig(path string) (Config, error) {
var config Config
_, err := toml.DecodeFile(path, &config)

return config, err
}
Loading