Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/concedo'
Browse files Browse the repository at this point in the history
  • Loading branch information
YellowRoseCx committed Feb 18, 2024
2 parents ae6ece1 + 1e460bb commit b9860f7
Show file tree
Hide file tree
Showing 155 changed files with 6,546 additions and 8,414 deletions.
32 changes: 0 additions & 32 deletions .devops/server-cuda.Dockerfile

This file was deleted.

45 changes: 0 additions & 45 deletions .devops/server-rocm.Dockerfile

This file was deleted.

20 changes: 0 additions & 20 deletions .devops/server.Dockerfile

This file was deleted.

62 changes: 0 additions & 62 deletions .github/workflows/nix-ci-aarch64.yml

This file was deleted.

3 changes: 0 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,3 +0,0 @@
[submodule "kompute"]
path = kompute
url = https://github.com/nomic-ai/kompute.git
17 changes: 0 additions & 17 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@ set(LLAMA_SANITIZE_THREAD OFF)
set(LLAMA_SANITIZE_ADDRESS OFF)
set(LLAMA_SANITIZE_UNDEFINED OFF)

option(MAKE_MISC_FILES "MAKE_MISC_FILES" OFF)

# instruction set specific
option(LLAMA_AVX "llama: enable AVX" ON)
option(LLAMA_AVX2 "llama: enable AVX2" ON)
Expand Down Expand Up @@ -467,18 +465,3 @@ if (LLAMA_HIPBLAS)
target_compile_features(${TARGET} PRIVATE cxx_std_11)
endif()


if (MAKE_MISC_FILES)
add_subdirectory(common)
add_library(llama
llama.cpp
llama.h
)
target_include_directories(llama PUBLIC .)
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
target_link_libraries(llama PRIVATE
ggml
${LLAMA_EXTRA_LIBS}
)
add_subdirectory(examples)
endif()
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ endif
# it is recommended to use the CMAKE file to build for cublas if you can - will likely work better
ifdef LLAMA_CUBLAS
CUBLAS_FLAGS = -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
CUBLASLD_FLAGS = -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib
CUBLASLD_FLAGS = -lcuda -lcublas -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib
CUBLAS_OBJS = ggml-cuda.o ggml_v3-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
NVCC = nvcc
NVCCFLAGS = --forward-unknown-to-host-compiler -use_fast_math
Expand Down
116 changes: 0 additions & 116 deletions awq-py/README.md

This file was deleted.

Loading

0 comments on commit b9860f7

Please sign in to comment.