./LLM/llama.cpp/llama-speculative \ -m /home/ultimis/LLM/Models/mradermacher/Qwen2.5-72B-Instruct-i1-GGUF/Qwen2.5-72B-Instruct.i1-Q4_K_M.gguf \ -md /home/ultimis/LLM/Models/bartowski/Qwen2.5-0.5B-Instruct-GGUF/Qwen2.5-0.5B-Instruct-Q8_0.gguf \ -p "// Quick-sort implementation in C (4 spaces indentation + detailed comments) and sample usage" \ --split-mode row --flash-attn --sampling-seq kfypmt \ -c 8192 --top_k 1 --draft 16 -ngl 88 -ngld 30 --temp 0 ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no ggml_cuda_init: found 3 CUDA devices: Device 0: Tesla P40, compute capability 6.1, VMM: yes Device 1: Tesla P40, compute capability 6.1, VMM: yes Device 2: Tesla P40, compute capability 6.1, VMM: yes build: 4031 (d5a409e5) with cc (Ubuntu 13.2.0-23ubuntu4) 13.2.0 for x86_64-linux-gnu llama_load_model_from_file: using device CUDA0 (Tesla P40) - 24286 MiB free llama_load_model_from_file: using device CUDA1 (Tesla P40) - 24290 MiB free llama_load_model_from_file: using device CUDA2 (Tesla P40) - 24290 MiB free llama_model_loader: loaded meta data with 46 key-value pairs and 963 tensors from /home/ultimis/LLM/Models/mradermacher/Qwen2.5-72B-Instruct-i1-GGUF/Qwen2.5-72B-Instruct.i1-Q4_K_M.gguf (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = qwen2 llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.name str = Qwen2.5 72B Instruct llama_model_loader: - kv 3: general.finetune str = Instruct llama_model_loader: - kv 4: general.basename str = Qwen2.5 llama_model_loader: - kv 5: general.size_label str = 72B llama_model_loader: - kv 6: general.license str = other llama_model_loader: - kv 7: general.license.name str = qwen llama_model_loader: - kv 8: general.license.link str = https://huggingface.co/Qwen/Qwen2.5-7... llama_model_loader: - kv 9: general.base_model.count u32 = 1 llama_model_loader: - kv 10: general.base_model.0.name str = Qwen2.5 72B llama_model_loader: - kv 11: general.base_model.0.organization str = Qwen llama_model_loader: - kv 12: general.base_model.0.repo_url str = https://huggingface.co/Qwen/Qwen2.5-72B llama_model_loader: - kv 13: general.tags arr[str,2] = ["chat", "text-generation"] llama_model_loader: - kv 14: general.languages arr[str,1] = ["en"] llama_model_loader: - kv 15: qwen2.block_count u32 = 80 llama_model_loader: - kv 16: qwen2.context_length u32 = 32768 llama_model_loader: - kv 17: qwen2.embedding_length u32 = 8192 llama_model_loader: - kv 18: qwen2.feed_forward_length u32 = 29568 llama_model_loader: - kv 19: qwen2.attention.head_count u32 = 64 llama_model_loader: - kv 20: qwen2.attention.head_count_kv u32 = 8 llama_model_loader: - kv 21: qwen2.rope.freq_base f32 = 1000000.000000 llama_model_loader: - kv 22: qwen2.attention.layer_norm_rms_epsilon f32 = 0.000001 llama_model_loader: - kv 23: general.file_type u32 = 15 llama_model_loader: - kv 24: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 25: tokenizer.ggml.pre str = qwen2 llama_model_loader: - kv 26: tokenizer.ggml.tokens arr[str,152064] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 27: tokenizer.ggml.token_type arr[i32,152064] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 28: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",... llama_model_loader: - kv 29: tokenizer.ggml.eos_token_id u32 = 151645 llama_model_loader: - kv 30: tokenizer.ggml.padding_token_id u32 = 151643 llama_model_loader: - kv 31: tokenizer.ggml.bos_token_id u32 = 151643 llama_model_loader: - kv 32: tokenizer.ggml.add_bos_token bool = false llama_model_loader: - kv 33: tokenizer.chat_template str = {%- if tools %}\n {{- '<|im_start|>... llama_model_loader: - kv 34: general.quantization_version u32 = 2 llama_model_loader: - kv 35: general.url str = https://huggingface.co/mradermacher/Q... llama_model_loader: - kv 36: mradermacher.quantize_version str = 2 llama_model_loader: - kv 37: mradermacher.quantized_by str = mradermacher llama_model_loader: - kv 38: mradermacher.quantized_at str = 2024-09-20T17:26:50+02:00 llama_model_loader: - kv 39: mradermacher.quantized_on str = db3 llama_model_loader: - kv 40: general.source.url str = https://huggingface.co/Qwen/Qwen2.5-7... llama_model_loader: - kv 41: mradermacher.convert_type str = hf llama_model_loader: - kv 42: quantize.imatrix.file str = Qwen2.5-72B-Instruct-i1-GGUF/imatrix.dat llama_model_loader: - kv 43: quantize.imatrix.dataset str = imatrix-training-full-3 llama_model_loader: - kv 44: quantize.imatrix.entries_count i32 = 560 llama_model_loader: - kv 45: quantize.imatrix.chunks_count i32 = 318 llama_model_loader: - type f32: 401 tensors llama_model_loader: - type q5_0: 40 tensors llama_model_loader: - type q8_0: 40 tensors llama_model_loader: - type q4_K: 401 tensors llama_model_loader: - type q5_K: 40 tensors llama_model_loader: - type q6_K: 41 tensors llm_load_vocab: special tokens cache size = 22 llm_load_vocab: token to piece cache size = 0.9310 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = qwen2 llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 152064 llm_load_print_meta: n_merges = 151387 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 32768 llm_load_print_meta: n_embd = 8192 llm_load_print_meta: n_layer = 80 llm_load_print_meta: n_head = 64 llm_load_print_meta: n_head_kv = 8 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 8 llm_load_print_meta: n_embd_k_gqa = 1024 llm_load_print_meta: n_embd_v_gqa = 1024 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-06 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 29568 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 2 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 1000000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 32768 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: ssm_dt_b_c_rms = 0 llm_load_print_meta: model type = 70B llm_load_print_meta: model ftype = Q4_K - Medium llm_load_print_meta: model params = 72.71 B llm_load_print_meta: model size = 44.15 GiB (5.22 BPW) llm_load_print_meta: general.name = Qwen2.5 72B Instruct llm_load_print_meta: BOS token = 151643 '<|endoftext|>' llm_load_print_meta: EOS token = 151645 '<|im_end|>' llm_load_print_meta: EOT token = 151645 '<|im_end|>' llm_load_print_meta: PAD token = 151643 '<|endoftext|>' llm_load_print_meta: LF token = 148848 'ÄĬ' llm_load_print_meta: FIM PRE token = 151659 '<|fim_prefix|>' llm_load_print_meta: FIM SUF token = 151661 '<|fim_suffix|>' llm_load_print_meta: FIM MID token = 151660 '<|fim_middle|>' llm_load_print_meta: FIM PAD token = 151662 '<|fim_pad|>' llm_load_print_meta: FIM REP token = 151663 '<|repo_name|>' llm_load_print_meta: FIM SEP token = 151664 '<|file_sep|>' llm_load_print_meta: EOG token = 151643 '<|endoftext|>' llm_load_print_meta: EOG token = 151645 '<|im_end|>' llm_load_print_meta: EOG token = 151662 '<|fim_pad|>' llm_load_print_meta: EOG token = 151663 '<|repo_name|>' llm_load_print_meta: EOG token = 151664 '<|file_sep|>' llm_load_print_meta: max token length = 256 llm_load_tensors: offloading 80 repeating layers to GPU llm_load_tensors: offloading output layer to GPU llm_load_tensors: offloaded 81/81 layers to GPU llm_load_tensors: CPU_Mapped model buffer size = 668.25 MiB llm_load_tensors: CUDA0 model buffer size = 2.74 MiB llm_load_tensors: CUDA1 model buffer size = 2.74 MiB llm_load_tensors: CUDA2 model buffer size = 2.67 MiB llm_load_tensors: CUDA0_Split model buffer size = 14833.89 MiB llm_load_tensors: CUDA1_Split model buffer size = 14307.76 MiB llm_load_tensors: CUDA2_Split model buffer size = 15395.42 MiB .................................................................................................. llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 8192 llama_new_context_with_model: n_ctx_per_seq = 8192 llama_new_context_with_model: n_batch = 2048 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 1 llama_new_context_with_model: freq_base = 1000000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (8192) < n_ctx_train (32768) -- the full capacity of the model will not be utilized llama_kv_cache_init: CUDA0 KV buffer size = 864.00 MiB llama_kv_cache_init: CUDA1 KV buffer size = 864.00 MiB llama_kv_cache_init: CUDA2 KV buffer size = 832.00 MiB llama_new_context_with_model: KV self size = 2560.00 MiB, K (f16): 1280.00 MiB, V (f16): 1280.00 MiB llama_new_context_with_model: CUDA_Host output buffer size = 0.58 MiB llama_new_context_with_model: CUDA0 compute buffer size = 171.50 MiB llama_new_context_with_model: CUDA1 compute buffer size = 171.50 MiB llama_new_context_with_model: CUDA2 compute buffer size = 313.00 MiB llama_new_context_with_model: CUDA_Host compute buffer size = 32.01 MiB llama_new_context_with_model: graph nodes = 2487 llama_new_context_with_model: graph splits = 4 common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) llama_load_model_from_file: using device CUDA0 (Tesla P40) - 8100 MiB free llama_load_model_from_file: using device CUDA1 (Tesla P40) - 7704 MiB free llama_load_model_from_file: using device CUDA2 (Tesla P40) - 7514 MiB free llama_model_loader: loaded meta data with 38 key-value pairs and 290 tensors from /home/ultimis/LLM/Models/bartowski/Qwen2.5-0.5B-Instruct-GGUF/Qwen2.5-0.5B-Instruct-Q8_0.gguf (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = qwen2 llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.name str = Qwen2.5 0.5B Instruct llama_model_loader: - kv 3: general.finetune str = Instruct llama_model_loader: - kv 4: general.basename str = Qwen2.5 llama_model_loader: - kv 5: general.size_label str = 0.5B llama_model_loader: - kv 6: general.license str = apache-2.0 llama_model_loader: - kv 7: general.license.link str = https://huggingface.co/Qwen/Qwen2.5-0... llama_model_loader: - kv 8: general.base_model.count u32 = 1 llama_model_loader: - kv 9: general.base_model.0.name str = Qwen2.5 0.5B llama_model_loader: - kv 10: general.base_model.0.organization str = Qwen llama_model_loader: - kv 11: general.base_model.0.repo_url str = https://huggingface.co/Qwen/Qwen2.5-0.5B llama_model_loader: - kv 12: general.tags arr[str,2] = ["chat", "text-generation"] llama_model_loader: - kv 13: general.languages arr[str,1] = ["en"] llama_model_loader: - kv 14: qwen2.block_count u32 = 24 llama_model_loader: - kv 15: qwen2.context_length u32 = 32768 llama_model_loader: - kv 16: qwen2.embedding_length u32 = 896 llama_model_loader: - kv 17: qwen2.feed_forward_length u32 = 4864 llama_model_loader: - kv 18: qwen2.attention.head_count u32 = 14 llama_model_loader: - kv 19: qwen2.attention.head_count_kv u32 = 2 llama_model_loader: - kv 20: qwen2.rope.freq_base f32 = 1000000.000000 llama_model_loader: - kv 21: qwen2.attention.layer_norm_rms_epsilon f32 = 0.000001 llama_model_loader: - kv 22: general.file_type u32 = 7 llama_model_loader: - kv 23: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 24: tokenizer.ggml.pre str = qwen2 llama_model_loader: - kv 25: tokenizer.ggml.tokens arr[str,151936] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 26: tokenizer.ggml.token_type arr[i32,151936] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 27: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",... llama_model_loader: - kv 28: tokenizer.ggml.eos_token_id u32 = 151645 llama_model_loader: - kv 29: tokenizer.ggml.padding_token_id u32 = 151643 llama_model_loader: - kv 30: tokenizer.ggml.bos_token_id u32 = 151643 llama_model_loader: - kv 31: tokenizer.ggml.add_bos_token bool = false llama_model_loader: - kv 32: tokenizer.chat_template str = {%- if tools %}\n {{- '<|im_start|>... llama_model_loader: - kv 33: general.quantization_version u32 = 2 llama_model_loader: - kv 34: quantize.imatrix.file str = /models_out/Qwen2.5-0.5B-Instruct-GGU... llama_model_loader: - kv 35: quantize.imatrix.dataset str = /training_dir/calibration_datav3.txt llama_model_loader: - kv 36: quantize.imatrix.entries_count i32 = 168 llama_model_loader: - kv 37: quantize.imatrix.chunks_count i32 = 128 llama_model_loader: - type f32: 121 tensors llama_model_loader: - type q8_0: 169 tensors llm_load_vocab: special tokens cache size = 22 llm_load_vocab: token to piece cache size = 0.9310 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = qwen2 llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 151936 llm_load_print_meta: n_merges = 151387 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 32768 llm_load_print_meta: n_embd = 896 llm_load_print_meta: n_layer = 24 llm_load_print_meta: n_head = 14 llm_load_print_meta: n_head_kv = 2 llm_load_print_meta: n_rot = 64 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 64 llm_load_print_meta: n_embd_head_v = 64 llm_load_print_meta: n_gqa = 7 llm_load_print_meta: n_embd_k_gqa = 128 llm_load_print_meta: n_embd_v_gqa = 128 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-06 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 4864 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 2 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 1000000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 32768 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: ssm_dt_b_c_rms = 0 llm_load_print_meta: model type = 1B llm_load_print_meta: model ftype = Q8_0 llm_load_print_meta: model params = 494.03 M llm_load_print_meta: model size = 500.79 MiB (8.50 BPW) llm_load_print_meta: general.name = Qwen2.5 0.5B Instruct llm_load_print_meta: BOS token = 151643 '<|endoftext|>' llm_load_print_meta: EOS token = 151645 '<|im_end|>' llm_load_print_meta: EOT token = 151645 '<|im_end|>' llm_load_print_meta: PAD token = 151643 '<|endoftext|>' llm_load_print_meta: LF token = 148848 'ÄĬ' llm_load_print_meta: FIM PRE token = 151659 '<|fim_prefix|>' llm_load_print_meta: FIM SUF token = 151661 '<|fim_suffix|>' llm_load_print_meta: FIM MID token = 151660 '<|fim_middle|>' llm_load_print_meta: FIM PAD token = 151662 '<|fim_pad|>' llm_load_print_meta: FIM REP token = 151663 '<|repo_name|>' llm_load_print_meta: FIM SEP token = 151664 '<|file_sep|>' llm_load_print_meta: EOG token = 151643 '<|endoftext|>' llm_load_print_meta: EOG token = 151645 '<|im_end|>' llm_load_print_meta: EOG token = 151662 '<|fim_pad|>' llm_load_print_meta: EOG token = 151663 '<|repo_name|>' llm_load_print_meta: EOG token = 151664 '<|file_sep|>' llm_load_print_meta: max token length = 256 llm_load_tensors: offloading 24 repeating layers to GPU llm_load_tensors: offloading output layer to GPU llm_load_tensors: offloaded 25/25 layers to GPU llm_load_tensors: CPU_Mapped model buffer size = 137.94 MiB llm_load_tensors: CUDA0 model buffer size = 0.10 MiB llm_load_tensors: CUDA1 model buffer size = 0.09 MiB llm_load_tensors: CUDA2 model buffer size = 0.08 MiB llm_load_tensors: CUDA0_Split model buffer size = 136.00 MiB llm_load_tensors: CUDA1_Split model buffer size = 120.89 MiB llm_load_tensors: CUDA2_Split model buffer size = 243.72 MiB ........................................................... llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 8192 llama_new_context_with_model: n_ctx_per_seq = 8192 llama_new_context_with_model: n_batch = 2048 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 1 llama_new_context_with_model: freq_base = 1000000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (8192) < n_ctx_train (32768) -- the full capacity of the model will not be utilized llama_kv_cache_init: CUDA0 KV buffer size = 36.00 MiB llama_kv_cache_init: CUDA1 KV buffer size = 32.00 MiB llama_kv_cache_init: CUDA2 KV buffer size = 28.00 MiB llama_new_context_with_model: KV self size = 96.00 MiB, K (f16): 48.00 MiB, V (f16): 48.00 MiB llama_new_context_with_model: CUDA_Host output buffer size = 0.58 MiB llama_new_context_with_model: CUDA0 compute buffer size = 40.50 MiB llama_new_context_with_model: CUDA1 compute buffer size = 32.25 MiB llama_new_context_with_model: CUDA2 compute buffer size = 298.50 MiB llama_new_context_with_model: CUDA_Host compute buffer size = 17.76 MiB llama_new_context_with_model: graph nodes = 751 llama_new_context_with_model: graph splits = 4 common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) ggml/src/ggml-cuda.cu:70: CUDA error CUDA error: invalid resource handle current device: 0, in function ggml_cuda_op_mul_mat at ggml/src/ggml-cuda.cu:1520 cudaEventRecord(src0_extra->events[ctx.device][0], ctx.stream()) Could not attach to process. If your uid matches the uid of the target process, check the setting of /proc/sys/kernel/yama/ptrace_scope, or try again as the root user. For more details, see /etc/sysctl.d/10-ptrace.conf ptrace: Operation not permitted. No stack. The program is not being run. Aborted (core dumped)