bash  GGML_VK_PERF_LOGGER=1 GGML_VK_PERF_LOGGER_FREQUENCY=99999 ./llama-server -m /home/tipu/AI/models/ggml-org/Qwen3-Coder-30B-A3B/Qwen3-Coder-30B-A3B-Instruct-Q8_0.gguf --port 8888 --jinja --n-predict -1 --n-gpu-layers 99 --flash-attn off --temp 0.7 --top-k 20 --top-p 0.8 --min-p 0.01 --ctx-size 30768 --mlock --ubatch-size 512 --batch-size 2048 --cache-reuse 512 --cache-ram -1 --ctx-checkpoints 16 --offline --parallel 1 --kv-unified --context-shift --prio-batch 3 --prio 3 --alias Qwen3-Coder-30B-A3B --no-mmap -dio ggml_vulkan: Found 1 Vulkan devices: ggml_vulkan: 0 = AMD Radeon Graphics (RADV RENOIR) (radv) | uma: 1 | fp16: 1 | bf16: 0 | warp size: 64 | shared memory: 65536 | int dot: 0 | matrix cores: none build: 7554 (9be29e68d) with GNU 13.3.0 for Linux x86_64 system info: n_threads = 8, n_threads_batch = 8, total_threads = 16 system_info: n_threads = 8 (n_threads_batch = 8) / 16 | CPU : SSE3 = 1 | SSSE3 = 1 | AVX = 1 | AVX2 = 1 | F16C = 1 | FMA = 1 | BMI2 = 1 | LLAMAFILE = 1 | OPENMP = 1 | REPACK = 1 | Running without SSL init: using 15 threads for HTTP server start: binding port with default address family main: loading model srv load_model: loading model '/home/tipu/AI/models/ggml-org/Qwen3-Coder-30B-A3B/Qwen3-Coder-30B-A3B-Instruct-Q8_0.gguf' common_init_result: fitting params to device memory, for bugs during this step try to reproduce them with -fit off, or provide --verbose logs if the bug only occurs with -fit on llama_params_fit_impl: projected to use 37522 MiB of device memory vs. 57344 MiB of free device memory llama_params_fit_impl: will leave 18776 >= 1024 MiB of free device memory, no changes needed llama_params_fit: successfully fit params to free device memory llama_params_fit: fitting params to free memory took 0.25 seconds llama_model_load_from_file_impl: using device Vulkan0 (AMD Radeon Graphics (RADV RENOIR)) (0000:03:00.0) - 56299 MiB free llama_model_loader: loaded meta data with 44 key-value pairs and 579 tensors from /home/tipu/AI/models/ggml-org/Qwen3-Coder-30B-A3B/Qwen3-Coder-30B-A3B-Instruct-Q8_0.gguf (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = qwen3moe llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.name str = Qwen3-Coder-30B-A3B-Instruct llama_model_loader: - kv 3: general.finetune str = Instruct llama_model_loader: - kv 4: general.basename str = Qwen3-Coder-30B-A3B-Instruct llama_model_loader: - kv 5: general.quantized_by str = Unsloth llama_model_loader: - kv 6: general.size_label str = 30B-A3B llama_model_loader: - kv 7: general.license str = apache-2.0 llama_model_loader: - kv 8: general.license.link str = https://huggingface.co/Qwen/Qwen3-Cod... llama_model_loader: - kv 9: general.repo_url str = https://huggingface.co/unsloth llama_model_loader: - kv 10: general.base_model.count u32 = 1 llama_model_loader: - kv 11: general.base_model.0.name str = Qwen3 Coder 30B A3B Instruct llama_model_loader: - kv 12: general.base_model.0.organization str = Qwen llama_model_loader: - kv 13: general.base_model.0.repo_url str = https://huggingface.co/Qwen/Qwen3-Cod... llama_model_loader: - kv 14: general.tags arr[str,2] = ["unsloth", "text-generation"] llama_model_loader: - kv 15: qwen3moe.block_count u32 = 48 llama_model_loader: - kv 16: qwen3moe.context_length u32 = 262144 llama_model_loader: - kv 17: qwen3moe.embedding_length u32 = 2048 llama_model_loader: - kv 18: qwen3moe.feed_forward_length u32 = 5472 llama_model_loader: - kv 19: qwen3moe.attention.head_count u32 = 32 llama_model_loader: - kv 20: qwen3moe.attention.head_count_kv u32 = 4 llama_model_loader: - kv 21: qwen3moe.rope.freq_base f32 = 10000000.000000 llama_model_loader: - kv 22: qwen3moe.attention.layer_norm_rms_epsilon f32 = 0.000001 llama_model_loader: - kv 23: qwen3moe.expert_used_count u32 = 8 llama_model_loader: - kv 24: qwen3moe.attention.key_length u32 = 128 llama_model_loader: - kv 25: qwen3moe.attention.value_length u32 = 128 llama_model_loader: - kv 26: qwen3moe.expert_count u32 = 128 llama_model_loader: - kv 27: qwen3moe.expert_feed_forward_length u32 = 768 llama_model_loader: - kv 28: qwen3moe.expert_shared_feed_forward_length u32 = 0 llama_model_loader: - kv 29: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 30: tokenizer.ggml.pre str = qwen2 llama_model_loader: - kv 31: tokenizer.ggml.tokens arr[str,151936] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 32: tokenizer.ggml.token_type arr[i32,151936] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 33: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",... llama_model_loader: - kv 34: tokenizer.ggml.eos_token_id u32 = 151645 llama_model_loader: - kv 35: tokenizer.ggml.padding_token_id u32 = 151654 llama_model_loader: - kv 36: tokenizer.ggml.add_bos_token bool = false llama_model_loader: - kv 37: tokenizer.chat_template str = {# Copyright 2025-present Unsloth. Ap... llama_model_loader: - kv 38: general.quantization_version u32 = 2 llama_model_loader: - kv 39: general.file_type u32 = 7 llama_model_loader: - kv 40: quantize.imatrix.file str = Qwen3-Coder-30B-A3B-Instruct-GGUF/ima... llama_model_loader: - kv 41: quantize.imatrix.dataset str = unsloth_calibration_Qwen3-Coder-30B-A... llama_model_loader: - kv 42: quantize.imatrix.entries_count u32 = 384 llama_model_loader: - kv 43: quantize.imatrix.chunks_count u32 = 154 llama_model_loader: - type f32: 241 tensors llama_model_loader: - type q8_0: 338 tensors print_info: file format = GGUF V3 (latest) print_info: file type = Q8_0 print_info: file size = 30.25 GiB (8.51 BPW) load: printing all EOG tokens: load: - 151643 ('<|endoftext|>') load: - 151645 ('<|im_end|>') load: - 151662 ('<|fim_pad|>') load: - 151663 ('<|repo_name|>') load: - 151664 ('<|file_sep|>') load: special tokens cache size = 26 load: token to piece cache size = 0.9311 MB print_info: arch = qwen3moe print_info: vocab_only = 0 print_info: no_alloc = 0 print_info: n_ctx_train = 262144 print_info: n_embd = 2048 print_info: n_embd_inp = 2048 print_info: n_layer = 48 print_info: n_head = 32 print_info: n_head_kv = 4 print_info: n_rot = 128 print_info: n_swa = 0 print_info: is_swa_any = 0 print_info: n_embd_head_k = 128 print_info: n_embd_head_v = 128 print_info: n_gqa = 8 print_info: n_embd_k_gqa = 512 print_info: n_embd_v_gqa = 512 print_info: f_norm_eps = 0.0e+00 print_info: f_norm_rms_eps = 1.0e-06 print_info: f_clamp_kqv = 0.0e+00 print_info: f_max_alibi_bias = 0.0e+00 print_info: f_logit_scale = 0.0e+00 print_info: f_attn_scale = 0.0e+00 print_info: n_ff = 5472 print_info: n_expert = 128 print_info: n_expert_used = 8 print_info: n_expert_groups = 0 print_info: n_group_used = 0 print_info: causal attn = 1 print_info: pooling type = 0 print_info: rope type = 2 print_info: rope scaling = linear print_info: freq_base_train = 10000000.0 print_info: freq_scale_train = 1 print_info: n_ctx_orig_yarn = 262144 print_info: rope_yarn_log_mul= 0.0000 print_info: rope_finetuned = unknown print_info: model type = 30B.A3B print_info: model params = 30.53 B print_info: general.name = Qwen3-Coder-30B-A3B-Instruct print_info: n_ff_exp = 768 print_info: vocab type = BPE print_info: n_vocab = 151936 print_info: n_merges = 151387 print_info: BOS token = 11 ',' print_info: EOS token = 151645 '<|im_end|>' print_info: EOT token = 151645 '<|im_end|>' print_info: PAD token = 151654 '<|vision_pad|>' print_info: LF token = 198 'Ċ' print_info: FIM PRE token = 151659 '<|fim_prefix|>' print_info: FIM SUF token = 151661 '<|fim_suffix|>' print_info: FIM MID token = 151660 '<|fim_middle|>' print_info: FIM PAD token = 151662 '<|fim_pad|>' print_info: FIM REP token = 151663 '<|repo_name|>' print_info: FIM SEP token = 151664 '<|file_sep|>' print_info: EOG token = 151643 '<|endoftext|>' print_info: EOG token = 151645 '<|im_end|>' print_info: EOG token = 151662 '<|fim_pad|>' print_info: EOG token = 151663 '<|repo_name|>' print_info: EOG token = 151664 '<|file_sep|>' print_info: max token length = 256 load_tensors: loading model tensors, this can take a while... (mmap = false, direct_io = true) load_tensors: offloading output layer to GPU load_tensors: offloading 47 repeating layers to GPU load_tensors: offloaded 49/49 layers to GPU load_tensors: Vulkan0 model buffer size = 30658.10 MiB load_tensors: Vulkan_Host model buffer size = 315.30 MiB ................................................................................................... common_init_result: added <|endoftext|> logit bias = -inf common_init_result: added <|im_end|> logit bias = -inf common_init_result: added <|fim_pad|> logit bias = -inf common_init_result: added <|repo_name|> logit bias = -inf common_init_result: added <|file_sep|> logit bias = -inf llama_context: constructing llama_context llama_context: n_seq_max = 1 llama_context: n_ctx = 30976 llama_context: n_ctx_seq = 30976 llama_context: n_batch = 2048 llama_context: n_ubatch = 512 llama_context: causal_attn = 1 llama_context: flash_attn = disabled llama_context: kv_unified = true llama_context: freq_base = 10000000.0 llama_context: freq_scale = 1 llama_context: n_ctx_seq (30976) < n_ctx_train (262144) -- the full capacity of the model will not be utilized llama_context: Vulkan_Host output buffer size = 0.58 MiB llama_kv_cache: Vulkan0 KV buffer size = 2904.00 MiB llama_kv_cache: size = 2904.00 MiB ( 30976 cells, 48 layers, 1/1 seqs), K (f16): 1452.00 MiB, V (f16): 1452.00 MiB llama_context: Vulkan0 compute buffer size = 3960.51 MiB llama_context: Vulkan_Host compute buffer size = 66.52 MiB llama_context: graph nodes = 3270 llama_context: graph splits = 2 common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) srv load_model: initializing slots, n_slots = 1 slot load_model: id 0 | task -1 | new slot, n_ctx = 30976 srv load_model: prompt cache is enabled, size limit: no limit srv load_model: use `--cache-ram 0` to disable the prompt cache srv load_model: for more info see https://github.com/ggml-org/llama.cpp/pull/16391 srv load_model: thinking = 0 load_model: chat template, chat_template: {# Copyright 2025-present Unsloth. Apache 2.0 License. Unsloth Chat template fixes #} {% macro render_item_list(item_list, tag_name='required') %} {%- if item_list is defined and item_list is iterable and item_list | length > 0 %} {%- if tag_name %}{{- '\n<' ~ tag_name ~ '>' -}}{% endif %} {{- '[' }} {%- for item in item_list -%} {%- if loop.index > 1 %}{{- ", "}}{% endif -%} {%- if item is string -%} {{ "`" ~ item ~ "`" }} {%- else -%} {{ item }} {%- endif -%} {%- endfor -%} {{- ']' }} {%- if tag_name %}{{- '' -}}{% endif %} {%- endif %} {% endmacro %} {%- if messages[0]["role"] == "system" %} {%- set system_message = messages[0]["content"] %} {%- set loop_messages = messages[1:] %} {%- else %} {%- set loop_messages = messages %} {%- endif %} {%- if not tools is defined %} {%- set tools = [] %} {%- endif %} {%- if system_message is defined %} {{- "<|im_start|>system\n" + system_message }} {%- else %} {%- if tools is iterable and tools | length > 0 %} {{- "<|im_start|>system\nYou are Qwen, a helpful AI assistant that can interact with a computer to solve tasks." }} {%- endif %} {%- endif %} {%- if tools is iterable and tools | length > 0 %} {{- "\n\nYou have access to the following functions:\n\n" }} {{- "" }} {%- for tool in tools %} {%- if tool.function is defined %} {%- set tool = tool.function %} {%- endif %} {{- "\n\n" ~ tool.name ~ "" }} {{- '\n' ~ (tool.description | trim) ~ '' }} {{- '\n' }} {%- for param_name, param_fields in tool.parameters.properties|items %} {{- '\n' }} {{- '\n' ~ param_name ~ '' }} {%- if param_fields.type is defined %} {{- '\n' ~ (param_fields.type | string) ~ '' }} {%- endif %} {%- if param_fields.description is defined %} {{- '\n' ~ (param_fields.description | trim) ~ '' }} {%- endif %} {{- render_item_list(param_fields.enum, 'enum') }} {%- set handled_keys = ['type', 'description', 'enum', 'required'] %} {%- for json_key, json_value in param_fields|items %} {%- if json_key not in handled_keys %} {%- set normed_json_key = json_key|string %} {%- if json_value is mapping %} {{- '\n<' ~ normed_json_key ~ '>' ~ (json_value | tojson | safe) ~ '' }} {%- else %} {{- '\n<' ~ normed_json_key ~ '>' ~ (json_value | string) ~ '' }} {%- endif %} {%- endif %} {%- endfor %} {{- render_item_list(param_fields.required, 'required') }} {{- '\n' }} {%- endfor %} {{- render_item_list(tool.parameters.required, 'required') }} {{- '\n' }} {%- if tool.return is defined %} {%- if tool.return is mapping %} {{- '\n' ~ (tool.return | tojson | safe) ~ '' }} {%- else %} {{- '\n' ~ (tool.return | string) ~ '' }} {%- endif %} {%- endif %} {{- '\n' }} {%- endfor %} {{- "\n" }} {{- '\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n\n\n\nvalue_1\n\n\nThis is the value for the second parameter\nthat can span\nmultiple lines\n\n\n\n\n\nReminder:\n- Function calls MUST follow the specified format: an inner block must be nested within XML tags\n- Required parameters MUST be specified\n- You may provide optional reasoning for your function call in natural language BEFORE the function call, but NOT after\n- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls\n' }} {%- endif %} {%- if system_message is defined %} {{- '<|im_end|>\n' }} {%- else %} {%- if tools is iterable and tools | length > 0 %} {{- '<|im_end|>\n' }} {%- endif %} {%- endif %} {%- for message in loop_messages %} {%- if message.role == "assistant" and message.tool_calls is defined and message.tool_calls is iterable and message.tool_calls | length > 0 %} {{- '<|im_start|>' + message.role }} {%- if message.content is defined and message.content is string and message.content | trim | length > 0 %} {{- '\n' + message.content | trim + '\n' }} {%- endif %} {%- for tool_call in message.tool_calls %} {%- if tool_call.function is defined %} {%- set tool_call = tool_call.function %} {%- endif %} {{- '\n\n\n' }} {%- if tool_call.arguments is defined %} {%- for args_name, args_value in tool_call.arguments|items %} {{- '\n' }} {%- set args_value = args_value if args_value is string else args_value | string %} {{- args_value }} {{- '\n\n' }} {%- endfor %} {%- endif %} {{- '\n' }} {%- endfor %} {{- '<|im_end|>\n' }} {%- elif message.role == "user" or message.role == "system" or message.role == "assistant" %} {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }} {%- elif message.role == "tool" %} {%- if loop.previtem and loop.previtem.role != "tool" %} {{- '<|im_start|>user\n' }} {%- endif %} {{- '\n' }} {{- message.content }} {{- '\n\n' }} {%- if not loop.last and loop.nextitem.role != "tool" %} {{- '<|im_end|>\n' }} {%- elif loop.last %} {{- '<|im_end|>\n' }} {%- endif %} {%- else %} {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' }} {%- endif %} {%- endfor %} {%- if add_generation_prompt %} {{- '<|im_start|>assistant\n' }} {%- endif %} {# Copyright 2025-present Unsloth. Apache 2.0 License. Unsloth Chat template fixes #}, example_format: '<|im_start|>system You are a helpful assistant<|im_end|> <|im_start|>user Hello<|im_end|> <|im_start|>assistant Hi there<|im_end|> <|im_start|>user How are you?<|im_end|> <|im_start|>assistant ' main: model loaded main: server is listening on http://127.0.0.1:8888 main: starting the main loop... srv update_slots: all slots are idle srv params_from_: Chat format: Qwen3 Coder slot get_availabl: id 0 | task -1 | selected slot by LRU, t_last = -1 slot launch_slot_: id 0 | task -1 | sampler chain: logits -> penalties -> dry -> top-n-sigma -> top-k -> typical -> top-p -> min-p -> xtc -> temp-ext -> dist slot launch_slot_: id 0 | task 0 | processing task slot update_slots: id 0 | task 0 | new prompt, n_ctx_slot = 30976, n_keep = 0, task.n_tokens = 16 slot update_slots: id 0 | task 0 | n_tokens = 0, memory_seq_rm [0, end) slot update_slots: id 0 | task 0 | prompt processing progress, n_tokens = 16, batch.n_tokens = 16, progress = 1.000000 slot update_slots: id 0 | task 0 | prompt done, n_tokens = 16, batch.n_tokens = 16 slot print_timing: id 0 | task 0 | prompt eval time = 734.46 ms / 16 tokens ( 45.90 ms per token, 21.78 tokens per second) eval time = 60560.01 ms / 685 tokens ( 88.41 ms per token, 11.31 tokens per second) total time = 61294.48 ms / 701 tokens slot release: id 0 | task 0 | stop processing: n_tokens = 700, truncated = 0 srv update_slots: all slots are idle srv log_server_r: request: POST /v1/chat/completions 127.0.0.1 200 ^Csrv operator(): operator(): cleaning up before exit... llama_memory_breakdown_print: | memory breakdown [MiB] | total free self model context compute unaccounted | llama_memory_breakdown_print: | - Vulkan0 (Graphics (RADV RENOIR)) | 57344 = 18375 + (37522 = 30658 + 2904 + 3960) + 1445 | llama_memory_breakdown_print: | - Host | 381 = 315 + 0 + 66 | ---------------- Vulkan Timings: ADD: 780 x 4.393 us = 3427.08 us CONT: 32928 x 2.97 us = 97816.6 us GET_ROWS: 1372 x 4.293 us = 5890.4 us GLU: 32928 x 4.277 us = 140842 us MUL: 94 x 133.467 us = 12545.9 us MULTI_ADD ADD: 32928 x 4.406 us = 145098 us MUL_MAT f16 m=128 n=16 k=256 batch=32: 48 x 100.056 us = 4802.72 us (334.699 GFLOPS/s) MUL_MAT f16 m=256 n=16 k=128 batch=32: 48 x 124.13 us = 5958.24 us (269.261 GFLOPS/s) MUL_MAT f32 m=128 n=16 k=2048: 47 x 181.981 us = 8553.12 us (46.0847 GFLOPS/s) MUL_MAT q8_0 m=2048 n=16 k=4096: 48 x 668.689 us = 32097.1 us (401.386 GFLOPS/s) MUL_MAT q8_0 m=4096 n=16 k=2048: 48 x 659.058 us = 31634.8 us (407.202 GFLOPS/s) MUL_MAT q8_0 m=512 n=16 k=2048: 96 x 0.334 us = 32.12 us (100263 GFLOPS/s) MUL_MAT_ADD MUL_MAT_VEC q8_0 m=2048 n=1 k=4096: 32148 x 222.829 us = 7.16353e+06 us (75.2824 GFLOPS/s) MUL_MAT_ID q8_0 m=2048 n=128 k=768 n_expert=128 batch=2: 47 x 10624.6 us = 499356 us (75.7471 GFLOPS/s) MUL_MAT_ID q8_0 m=2048 n=8 k=768 n_expert=128 batch=16: 47 x 4394.88 us = 206559 us (91.5591 GFLOPS/s) MUL_MAT_ID q8_0 m=768 n=128 k=2048 n_expert=128 batch=2: 94 x 10526.6 us = 989500 us (76.4834 GFLOPS/s) MUL_MAT_ID q8_0 m=768 n=8 k=2048 n_expert=128 batch=16: 94 x 4349.88 us = 408889 us (92.5439 GFLOPS/s) MUL_MAT_ID_MUL MUL_MAT_ID_VEC q8_0 m=2048 n=128 k=768 n_expert=128: 1 x 4685.92 us = 4685.92 us (85.8724 GFLOPS/s) MUL_MAT_ID_MUL MUL_MAT_ID_VEC q8_0 m=2048 n=8 k=768 n_expert=128: 32833 x 316.897 us = 1.04047e+07 us (79.3615 GFLOPS/s) MUL_MAT_ID_VEC q8_0 m=768 n=128 k=2048 n_expert=128: 2 x 4861.38 us = 9722.76 us (82.8067 GFLOPS/s) MUL_MAT_ID_VEC q8_0 m=768 n=8 k=2048 n_expert=128: 65666 x 323.983 us = 2.12747e+07 us (77.6573 GFLOPS/s) MUL_MAT_VEC f16 m=128 n=1 k=256 batch=32: 11520 x 30.612 us = 352656 us (68.3727 GFLOPS/s) MUL_MAT_VEC f16 m=128 n=1 k=512 batch=32: 12288 x 46.716 us = 574057 us (89.6936 GFLOPS/s) MUL_MAT_VEC f16 m=128 n=1 k=768 batch=32: 9024 x 66.831 us = 603087 us (94.0779 GFLOPS/s) MUL_MAT_VEC f16 m=128 n=2 k=256 batch=32: 48 x 127.955 us = 6141.84 us (32.7155 GFLOPS/s) MUL_MAT_VEC f16 m=256 n=1 k=128 batch=32: 11520 x 24.281 us = 279721 us (86.0314 GFLOPS/s) MUL_MAT_VEC f16 m=256 n=2 k=128 batch=32: 48 x 188.656 us = 9055.52 us (22.1456 GFLOPS/s) MUL_MAT_VEC f16 m=512 n=1 k=128 batch=32: 12288 x 39.648 us = 487201 us (105.374 GFLOPS/s) MUL_MAT_VEC f16 m=768 n=1 k=128 batch=32: 9024 x 55.247 us = 498551 us (113.433 GFLOPS/s) MUL_MAT_VEC f32 m=128 n=1 k=2048: 32834 x 28.037 us = 920576 us (18.6951 GFLOPS/s) MUL_MAT_VEC f32 m=128 n=2 k=2048: 47 x 27.9 us = 1311.32 us (37.5736 GFLOPS/s) MUL_MAT_VEC q8_0 m=151936 n=1 k=2048: 686 x 7977.48 us = 5.47255e+06 us (77.9918 GFLOPS/s) MUL_MAT_VEC q8_0 m=2048 n=1 k=4096: 684 x 221.86 us = 151752 us (75.6114 GFLOPS/s) MUL_MAT_VEC q8_0 m=2048 n=2 k=4096: 48 x 223.284 us = 10717.6 us (150.258 GFLOPS/s) MUL_MAT_VEC q8_0 m=4096 n=1 k=2048: 32832 x 229.88 us = 7.54743e+06 us (72.9646 GFLOPS/s) MUL_MAT_VEC q8_0 m=4096 n=2 k=2048: 48 x 223.579 us = 10731.8 us (150.042 GFLOPS/s) MUL_MAT_VEC q8_0 m=512 n=1 k=2048: 65664 x 22.038 us = 1.44716e+06 us (95.1336 GFLOPS/s) MUL_MAT_VEC q8_0 m=512 n=2 k=2048: 96 x 21.27 us = 2041.92 us (197.145 GFLOPS/s) RMS_NORM_MUL RMS_NORM(2048,1,1,1): 66352 x 5.374 us = 356620 us RMS_NORM_MUL RMS_NORM(2048,16,1,1): 95 x 14.966 us = 1421.84 us RMS_NORM_MUL RMS_NORM(2048,2,1,1): 95 x 7.029 us = 667.76 us RMS_NORM_MUL_ROPE RMS_NORM(128,32,1,1): 32832 x 11.591 us = 380571 us RMS_NORM_MUL_ROPE RMS_NORM(128,32,16,1): 48 x 65.856 us = 3161.12 us RMS_NORM_MUL_ROPE RMS_NORM(128,32,2,1): 48 x 14.922 us = 716.28 us RMS_NORM_MUL_ROPE_VIEW_SET_ROWS RMS_NORM(128,4,1,1): 32832 x 7.34 us = 241009 us RMS_NORM_MUL_ROPE_VIEW_SET_ROWS RMS_NORM(128,4,16,1): 48 x 14.377 us = 690.12 us RMS_NORM_MUL_ROPE_VIEW_SET_ROWS RMS_NORM(128,4,2,1): 48 x 11.031 us = 529.52 us SET_ROWS: 32928 x 0.486 us = 16033.5 us SOFT_MAX: 32928 x 10.865 us = 357788 us TOPK_MOE_EARLY_SOFTMAX_NORM SOFT_MAX: 32928 x 6.266 us = 206346 us Total time: 6.13906e+07 us.