Skip to content

Commit e0d7b6a

Browse files
committed
feat: fix log style,etc.
1 parent 0e9219d commit e0d7b6a

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

xllm/core/framework/sampling/constrained_decoding.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,16 @@ limitations under the License.
2020

2121
namespace xllm {
2222

23-
// constrained decoding is used to ensure that the generated content
23+
// Constrained decoding is used to ensure that the generated content
2424
// conforms to specific formats or rules.
2525
class ConstrainedDecoding {
2626
public:
2727
virtual ~ConstrainedDecoding();
2828

2929
virtual bool build_mask_cache();
3030

31-
// input generated_token_list: [sequence_num][generated_token_ids]
32-
// output: mask tensor[sequence_num,vocab_size]
31+
// Input generated_token_list: [sequence_num][generated_token_ids]
32+
// Output: mask tensor[sequence_num,vocab_size]
3333
virtual torch::Tensor generate_mask(
3434
const std::vector<std::vector<int32_t>>& generated_token_list);
3535
};

xllm/core/framework/sampling/rec_constrained_decoding.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ bool RecConstrainedDecoding::build_mask_cache() {
5959

6060
build_mask_cache_ = true;
6161

62-
LOG(INFO) << "build mask cache, first token ids size:"
62+
LOG(INFO) << "Build mask cache, first token ids size:"
6363
<< first_token_ids.size();
6464

6565
return true;
@@ -73,14 +73,14 @@ torch::Tensor RecConstrainedDecoding::generate_mask(
7373

7474
size_t token_size = generated_token_list[0].size();
7575

76-
// generate mask for first token
76+
// Generate mask for first token
7777
if (0 == token_size) {
7878
size_t sequence_num = generated_token_list.size();
7979
auto mask = first_token_mask_.unsqueeze(0);
8080
return mask.repeat({sequence_num, 1});
8181
}
8282

83-
// generate mask for non-first token
83+
// Generate mask for non-first token
8484
return generate_decode_mask(generated_token_list);
8585
}
8686

@@ -121,12 +121,12 @@ torch::Tensor RecConstrainedDecoding::generate_decode_mask(
121121
local_vocab_indices.push_back(static_cast<int64_t>(vocab_idx));
122122
}
123123
} else {
124-
LOG(ERROR) << "fail to generate mask for tokens:"
124+
LOG(ERROR) << "Fail to generate mask for tokens:"
125125
<< generated_token_list[token_idx];
126126
}
127127
}
128128

129-
// merge local results to global batch (thread-safe)
129+
// Merge local results to global batch (thread-safe)
130130
if (!local_token_indices.empty()) {
131131
std::lock_guard<std::mutex> lock(global_batch_mutex);
132132
global_batch_token_indices.insert(global_batch_token_indices.end(),

0 commit comments

Comments
 (0)