Skip to content

Commit b7353b2

Browse files
committed
chore: remove comment on predefined file
1 parent bada7e3 commit b7353b2

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

web-app/src/lib/predefined.ts

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
export const modelSettings = {
22
ctx_len: {
3-
key: 'ctx_len', // ctx_size
3+
key: 'ctx_len',
44
title: 'Context Size',
55
description: 'Size of the prompt context (0 = loaded from model).',
66
controller_type: 'input',
@@ -11,7 +11,7 @@ export const modelSettings = {
1111
},
1212
},
1313
ngl: {
14-
key: 'ngl', // n_gpu_layers
14+
key: 'ngl',
1515
title: 'GPU Layers',
1616
description:
1717
'Number of model layers to offload to the GPU (-1 for all layers, 0 for CPU only).',
@@ -24,7 +24,7 @@ export const modelSettings = {
2424
},
2525

2626
temperature: {
27-
key: 'temperature', // temp
27+
key: 'temperature',
2828
title: 'Temperature',
2929
description:
3030
'Temperature for sampling (higher = more random). This is the default setting on load and can be overridden by the assistant settings.',
@@ -38,7 +38,7 @@ export const modelSettings = {
3838
},
3939
},
4040
top_k: {
41-
key: 'top_k', // top-k
41+
key: 'top_k',
4242
title: 'Top K',
4343
description:
4444
'Top-K sampling (0 = disabled). This is the default setting on load and can be overridden by the assistant settings.',
@@ -50,7 +50,7 @@ export const modelSettings = {
5050
},
5151
},
5252
top_p: {
53-
key: 'top_p', // top-p
53+
key: 'top_p',
5454
title: 'Top P',
5555
description:
5656
'Top-P sampling (1.0 = disabled). This is the default setting on load and can be overridden by the assistant settings.',
@@ -62,7 +62,7 @@ export const modelSettings = {
6262
},
6363
},
6464
min_p: {
65-
key: 'min_p', // min-p
65+
key: 'min_p',
6666
title: 'Min P',
6767
description:
6868
'Min-P sampling (0.0 = disabled). This is the default setting on load and can be overridden by the assistant settings.',
@@ -74,7 +74,7 @@ export const modelSettings = {
7474
},
7575
},
7676
repeat_last_n: {
77-
key: 'repeat_last_n', //repeat-last-n
77+
key: 'repeat_last_n',
7878
title: 'Repeat Last N',
7979
description:
8080
'Number of tokens to consider for repeat penalty (0 = disabled, -1 = ctx_size). This is the default setting on load and can be overridden by the assistant settings.',
@@ -86,7 +86,7 @@ export const modelSettings = {
8686
},
8787
},
8888
repeat_penalty: {
89-
key: 'repeat_penalty', // repeat-penalty
89+
key: 'repeat_penalty',
9090
title: 'Repeat Penalty',
9191
description:
9292
'Penalize repeating token sequences (1.0 = disabled). This is the default setting on load and can be overridden by the assistant settings.',
@@ -98,7 +98,7 @@ export const modelSettings = {
9898
},
9999
},
100100
presence_penalty: {
101-
key: 'presence_penalty', // presence-penalty
101+
key: 'presence_penalty',
102102
title: 'Presence Penalty',
103103
description:
104104
'Repeat alpha presence penalty (0.0 = disabled). This is the default setting on load and can be overridden by the assistant settings.',
@@ -110,7 +110,7 @@ export const modelSettings = {
110110
},
111111
},
112112
frequency_penalty: {
113-
key: 'frequency_penalty', // frequency-penalty
113+
key: 'frequency_penalty',
114114
title: 'Frequency Penalty',
115115
description:
116116
'Repeat alpha frequency penalty (0.0 = disabled). This is the default setting on load and can be overridden by the assistant settings.',

0 commit comments

Comments
 (0)