main.exe -m ..\models\7b\zephyr-7b-alpha.Q8_0.gguf -ngl 2000000 -c 4096 -p "<|user|>Write a long story about llama\n<|assistant|>" --temp 0 -n 1024 -s 41
Log start
main: build = 1336 (9ca79d5)
main: built with MSVC 19.35.32217.1 for x64
main: seed = 41
ggml_init_cublas: found 1 CUDA devices:
Device 0: NVIDIA GeForce RTX 3090, compute capability 8.6
llama_model_loader: loaded meta data with 21 key-value pairs and 291 tensors from ..\models\7b\zephyr-7b-alpha.Q8_0.gguf (version GGUF V2 (latest))
llama_model_loader: - tensor 0: token_embd.weight q8_0 [ 4096, 32000, 1, 1 ]
llama_model_loader: - tensor 1: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 2: blk.0.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 3: blk.0.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 4: blk.0.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 5: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 6: blk.0.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 7: blk.0.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 8: blk.0.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 9: blk.0.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 10: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 11: blk.1.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 12: blk.1.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 13: blk.1.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 14: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 15: blk.1.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 16: blk.1.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 17: blk.1.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 18: blk.1.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 19: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 20: blk.10.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 21: blk.10.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 22: blk.10.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 23: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 24: blk.10.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 25: blk.10.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 26: blk.10.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 27: blk.10.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 28: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 29: blk.11.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 30: blk.11.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 31: blk.11.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 32: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 33: blk.11.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 34: blk.11.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 35: blk.11.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 36: blk.11.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 37: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 38: blk.12.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 39: blk.12.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 40: blk.12.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 41: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 42: blk.12.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 43: blk.12.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 44: blk.12.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 45: blk.12.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 46: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 47: blk.13.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 48: blk.13.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 49: blk.13.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 50: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 51: blk.13.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 52: blk.13.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 53: blk.13.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 54: blk.13.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 55: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 56: blk.14.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 57: blk.14.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 58: blk.14.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 59: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 60: blk.14.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 61: blk.14.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 62: blk.14.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 63: blk.14.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 64: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 65: blk.15.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 66: blk.15.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 67: blk.15.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 68: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 69: blk.15.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 70: blk.15.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 71: blk.15.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 72: blk.15.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 73: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 74: blk.16.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 75: blk.16.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 76: blk.16.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 77: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 78: blk.16.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 79: blk.16.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 80: blk.16.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 81: blk.16.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 82: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 83: blk.17.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 84: blk.17.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 85: blk.17.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 86: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 87: blk.17.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 88: blk.17.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 89: blk.17.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 90: blk.17.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 91: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 92: blk.18.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 93: blk.18.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 94: blk.18.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 95: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 96: blk.18.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 97: blk.18.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 98: blk.18.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 99: blk.18.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 100: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 101: blk.19.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 102: blk.19.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 103: blk.19.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 104: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 105: blk.19.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 106: blk.19.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 107: blk.19.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 108: blk.19.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 109: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 110: blk.2.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 111: blk.2.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 112: blk.2.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 113: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 114: blk.2.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 115: blk.2.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 116: blk.2.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 117: blk.2.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 118: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 119: blk.20.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 120: blk.20.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 121: blk.20.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 122: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 123: blk.20.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 124: blk.20.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 125: blk.20.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 126: blk.20.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 127: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 128: blk.21.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 129: blk.21.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 130: blk.21.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 131: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 132: blk.21.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 133: blk.21.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 134: blk.21.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 135: blk.21.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 136: blk.22.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 137: blk.22.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 138: blk.22.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 139: blk.22.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 140: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 141: blk.3.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 142: blk.3.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 143: blk.3.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 144: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 145: blk.3.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 146: blk.3.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 147: blk.3.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 148: blk.3.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 149: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 150: blk.4.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 151: blk.4.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 152: blk.4.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 153: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 154: blk.4.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 155: blk.4.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 156: blk.4.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 157: blk.4.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 158: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 159: blk.5.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 160: blk.5.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 161: blk.5.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 162: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 163: blk.5.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 164: blk.5.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 165: blk.5.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 166: blk.5.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 167: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 168: blk.6.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 169: blk.6.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 170: blk.6.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 171: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 172: blk.6.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 173: blk.6.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 174: blk.6.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 175: blk.6.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 176: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 177: blk.7.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 178: blk.7.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 179: blk.7.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 180: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 181: blk.7.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 182: blk.7.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 183: blk.7.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 184: blk.7.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 185: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 186: blk.8.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 187: blk.8.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 188: blk.8.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 189: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 190: blk.8.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 191: blk.8.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 192: blk.8.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 193: blk.8.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 194: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 195: blk.9.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 196: blk.9.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 197: blk.9.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 198: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 199: blk.9.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 200: blk.9.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 201: blk.9.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 202: blk.9.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 203: output.weight q8_0 [ 4096, 32000, 1, 1 ]
llama_model_loader: - tensor 204: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 205: blk.22.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 206: blk.22.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 207: blk.22.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 208: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 209: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 210: blk.23.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 211: blk.23.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 212: blk.23.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 213: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 214: blk.23.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 215: blk.23.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 216: blk.23.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 217: blk.23.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 218: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 219: blk.24.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 220: blk.24.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 221: blk.24.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 222: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 223: blk.24.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 224: blk.24.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 225: blk.24.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 226: blk.24.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 227: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 228: blk.25.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 229: blk.25.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 230: blk.25.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 231: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 232: blk.25.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 233: blk.25.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 234: blk.25.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 235: blk.25.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 236: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 237: blk.26.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 238: blk.26.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 239: blk.26.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 240: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 241: blk.26.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 242: blk.26.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 243: blk.26.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 244: blk.26.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 245: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 246: blk.27.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 247: blk.27.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 248: blk.27.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 249: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 250: blk.27.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 251: blk.27.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 252: blk.27.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 253: blk.27.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 254: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 255: blk.28.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 256: blk.28.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 257: blk.28.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 258: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 259: blk.28.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 260: blk.28.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 261: blk.28.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 262: blk.28.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 263: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 264: blk.29.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 265: blk.29.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 266: blk.29.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 267: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 268: blk.29.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 269: blk.29.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 270: blk.29.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 271: blk.29.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 272: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 273: blk.30.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 274: blk.30.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 275: blk.30.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 276: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 277: blk.30.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 278: blk.30.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 279: blk.30.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 280: blk.30.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 281: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 282: blk.31.ffn_down.weight q8_0 [ 14336, 4096, 1, 1 ]
llama_model_loader: - tensor 283: blk.31.ffn_gate.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 284: blk.31.ffn_up.weight q8_0 [ 4096, 14336, 1, 1 ]
llama_model_loader: - tensor 285: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 286: blk.31.attn_k.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 287: blk.31.attn_output.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 288: blk.31.attn_q.weight q8_0 [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 289: blk.31.attn_v.weight q8_0 [ 4096, 1024, 1, 1 ]
llama_model_loader: - tensor 290: output_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - kv 0: general.architecture str
llama_model_loader: - kv 1: general.name str
llama_model_loader: - kv 2: llama.context_length u32
llama_model_loader: - kv 3: llama.embedding_length u32
llama_model_loader: - kv 4: llama.block_count u32
llama_model_loader: - kv 5: llama.feed_forward_length u32
llama_model_loader: - kv 6: llama.rope.dimension_count u32
llama_model_loader: - kv 7: llama.attention.head_count u32
llama_model_loader: - kv 8: llama.attention.head_count_kv u32
llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32
llama_model_loader: - kv 10: llama.rope.freq_base f32
llama_model_loader: - kv 11: general.file_type u32
llama_model_loader: - kv 12: tokenizer.ggml.model str
llama_model_loader: - kv 13: tokenizer.ggml.tokens arr
llama_model_loader: - kv 14: tokenizer.ggml.scores arr
llama_model_loader: - kv 15: tokenizer.ggml.token_type arr
llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32
llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32
llama_model_loader: - kv 18: tokenizer.ggml.unknown_token_id u32
llama_model_loader: - kv 19: tokenizer.ggml.padding_token_id u32
llama_model_loader: - kv 20: general.quantization_version u32
llama_model_loader: - type f32: 65 tensors
llama_model_loader: - type q8_0: 226 tensors
llm_load_print_meta: format = GGUF V2 (latest)
llm_load_print_meta: arch = llama
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 32000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: n_ctx_train = 32768
llm_load_print_meta: n_embd = 4096
llm_load_print_meta: n_head = 32
llm_load_print_meta: n_head_kv = 8
llm_load_print_meta: n_layer = 32
llm_load_print_meta: n_rot = 128
llm_load_print_meta: n_gqa = 4
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-05
llm_load_print_meta: n_ff = 14336
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: model type = 7B
llm_load_print_meta: model ftype = mostly Q8_0
llm_load_print_meta: model params = 7.24 B
llm_load_print_meta: model size = 7.17 GiB (8.50 BPW)
llm_load_print_meta: general.name = huggingfaceh4_zephyr-7b-alpha
llm_load_print_meta: BOS token = 1 ''
llm_load_print_meta: EOS token = 2 ''
llm_load_print_meta: UNK token = 0 ''
llm_load_print_meta: PAD token = 2 ''
llm_load_print_meta: LF token = 13 '<0x0A>'
llm_load_tensors: ggml ctx size = 0.09 MB
llm_load_tensors: using CUDA for GPU acceleration
llm_load_tensors: mem required = 132.91 MB
llm_load_tensors: offloading 32 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 35/35 layers to GPU
llm_load_tensors: VRAM used: 7205.83 MB
...................................................................................................
llama_new_context_with_model: n_ctx = 4096
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_kv_cache_init: offloading v cache to GPU
llama_kv_cache_init: offloading k cache to GPU
llama_kv_cache_init: VRAM kv self = 512.00 MB
llama_new_context_with_model: kv self size = 512.00 MB
llama_new_context_with_model: compute buffer total size = 293.88 MB
llama_new_context_with_model: VRAM scratch buffer: 288.00 MB
llama_new_context_with_model: total VRAM used: 8005.83 MB (model: 7205.83 MB, context: 800.00 MB)
system_info: n_threads = 8 / 16 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 |
sampling: repeat_last_n = 64, repeat_penalty = 1.100000, presence_penalty = 0.000000, frequency_penalty = 0.000000, top_k = 40, tfs_z = 1.000000, top_p = 0.950000, typical_p = 1.000000, temp = 0.000000, mirostat = 0, mirostat_lr = 0.100000, mirostat_ent = 5.000000
generate: n_ctx = 4096, n_batch = 512, n_predict = 1024, n_keep = 0
<|user|>Write a long story about llama\n<|assistant|>
<|>
Once upon a time, in the heart of the Andes Mountains, there was a herd of llamas. These majestic creatures were known for their gentle nature and their ability to withstand harsh weather conditions. They roamed freely through the rugged terrain, grazing on the lush grass that grew in abundance around them.
One day, a young llama named Llama was born into this herd. She was a beautiful creature, with soft brown fur and large, expressive eyes. From the moment she took her first steps, Llama knew that she was different from the other llamas. She had a curious nature and a thirst for knowledge that set her apart from her peers.
As Llama grew older, she began to explore the world around her. She would venture out into the mountains, seeking out new grazing grounds and learning about the different plants and flowers that grew in the region. She was fascinated by the way that these plants changed with the seasons, and she spent hours observing their growth patterns and learning how they could be used for food and medicine.
One day, while Llama was out exploring, she stumbled upon a group of humans who were camping in the mountains. At first, Llama was frightened by these strange creatures, but as she watched them from a distance, she began to realize that they were not a threat. In fact, she saw that they were kind and gentle, and that they had a deep respect for the natural world around them.
Llama approached the humans cautiously, and to her surprise, they welcomed her with open arms. They fed her and cared for her, and they taught her about the ways of the human world. Llama was fascinated by their language and their technology, but she also knew that she had a responsibility to protect her own kind.
As time went on, Llama became a bridge between the llama herd and the humans who lived in the mountains. She would lead the humans to new grazing grounds, and she would help them to understand the ways of the natural world around them. In return, the humans would provide food and shelter for Llama and her herd, and they would work together to protect the land from harm.
Llama's story became a legend in the Andes Mountains. She was known as the wise llama who had brought peace between the humans and the llamas, and she was revered by both species for her kindness and her wisdom. Llama lived a long and happy life, and she died surrounded by the love and respect of all those who knew her.
Today, the Andes Mountains are still home to many llamas, and they continue to thrive in this beautiful and rugged landscape. But they also know that they have a friend in the human world, thanks to the wisdom and kindness of Llama, the wise llama who brought peace between two worlds. [end of text]
llama_print_timings: load time = 3150.73 ms
llama_print_timings: sample time = 149.46 ms / 623 runs ( 0.24 ms per token, 4168.31 tokens per second)
llama_print_timings: prompt eval time = 115.86 ms / 23 tokens ( 5.04 ms per token, 198.52 tokens per second)
llama_print_timings: eval time = 9558.68 ms / 622 runs ( 15.37 ms per token, 65.07 tokens per second)
llama_print_timings: total time = 10518.21 ms
Log end