repo_name stringlengths 1 62 | dataset stringclasses 1
value | lang stringclasses 11
values | pr_id int64 1 20.1k | owner stringlengths 2 34 | reviewer stringlengths 2 39 | diff_hunk stringlengths 15 262k | code_review_comment stringlengths 1 99.6k |
|---|---|---|---|---|---|---|---|
fuwari | github_2023 | others | 243 | saicaca | saicaca | @@ -1037,6 +1037,9 @@ packages:
'@iconify-json/material-symbols@1.2.5':
resolution: {integrity: sha512-CXG4noUsqBrbr0npZVldxlf8XGUW3LF3VEGvgk/2k+A3GQt/DM/04PsbjkO2wOI/ORP8rcaHz2pw8F2TPL+CKA==}
+ '@iconify-json/mdi@1.2.1':
+ resolution: {integrity: sha512-dSkQU78gsZV6Yxnq78+LuX7jzeFC/5NAmz7O3rh558GimGFcwMV... | Could you please remove the change here? |
fuwari | github_2023 | others | 237 | saicaca | saicaca | @@ -26,6 +27,17 @@ const className = Astro.props.class
<span class="text-50 text-sm font-medium">{formatDateToYYYYMMDD(published)}</span>
</div>
+ <!-- update date -->
+ {updated && updated.getTime() !== published.getTime() && (
+ <div class="flex items-center">
+ <div class="met... | I think we could use `material-symbols:edit-calendar-outline-rounded` here, so there's no need to add a new icon package. |
fuwari | github_2023 | others | 212 | saicaca | saicaca | @@ -1,47 +1,50 @@
-import sitemap from '@astrojs/sitemap'
-import svelte from '@astrojs/svelte'
-import tailwind from '@astrojs/tailwind'
-import swup from '@swup/astro'
-import Compress from 'astro-compress'
-import icon from 'astro-icon'
-import { defineConfig } from 'astro/config'
-import Color from 'colorjs.io'
-im... | I'm not quite sure, do these two properties exist in the Astro configuration? |
fuwari | github_2023 | others | 195 | saicaca | saicaca | @@ -151,44 +149,14 @@ const bannerOffset = bannerOffsetByPosition[siteConfig.banner.position || 'cente
<div id="page-height-extend" class="hidden h-[300vh]"></div>
</body>
</html>
+
<style is:global>
:root {
--hue: var(--configHue);
--page-width: 75rem;
}
</style>
-<style is:global define:vars={{ bann... | This line defines the CSS variable `--bannerOffset`. Removing this causes it to become undefined. I am not sure but this might be the reason why the banner isn't working. |
fuwari | github_2023 | others | 195 | saicaca | saicaca | @@ -130,8 +130,6 @@ const bannerOffset = bannerOffsetByPosition[siteConfig.banner.position || 'cente
<link rel="alternate" type="application/rss+xml" title={profileConfig.name} href={`${Astro.site}rss.xml`}/>
- <style define:vars={{ configHue }}></style> <!-- defines global css variables. This will be applied ... | Here too. I suggest keeping this style element to store the CSS variables (moving `bannerOffset` here as well). |
fuwari | github_2023 | others | 195 | saicaca | saicaca | @@ -0,0 +1,207 @@
+/* The integration's default injected base.css file */
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+@layer components {
+ .card-base {
+ @apply rounded-[var(--radius-large)] overflow-hidden bg-[var(--card-bg)] transition;
+ }
+ h1, h2, h3, h4, h5, h6, p, a, span, l... | There are two `@layer` wrapping here. Is this a mistake? |
fuwari | github_2023 | others | 195 | saicaca | saicaca | @@ -151,17 +151,15 @@ const bannerOffset = bannerOffsetByPosition[siteConfig.banner.position || 'cente
<div id="page-height-extend" class="hidden h-[300vh]"></div>
</body>
</html>
+
<style is:global>
:root {
--hue: var(--configHue);
--page-width: 75rem;
}
</style>
-<style is:global define:vars={{ bann... | The CSS code here needs to be wrapped inside `@layer components` to ensure `lg:is-home` is properly processed, which is needed for the banner height change on the homepage. |
fuwari | github_2023 | typescript | 195 | saicaca | saicaca | @@ -15,7 +15,7 @@ export const siteConfig: SiteConfig = {
fixed: false, // Hide the theme color picker for visitors
},
banner: {
- enable: false,
+ enable: true, | Please revert the change here. |
fuwari | github_2023 | others | 183 | saicaca | saicaca | @@ -12,15 +12,16 @@ interface Props {
banner?: string
description?: string
lang?: string
+ setOGTypeArticle?: boolean;
}
-const { title, banner, description, lang } = Astro.props
+const { title, banner, description, lang, setOGTypeArticle, setOGTypeArticle } = Astro.props | `setOGTypeArticle` is declared twice here, which caused the build error. |
fuwari | github_2023 | others | 151 | saicaca | saicaca | @@ -30,21 +30,22 @@ const { Content } = await entry.render()
const { remarkPluginFrontmatter } = await entry.render()
const jsonLd = {
- '@context': 'https://schema.org',
- '@type': 'BlogPosting',
- headline: entry.data.title,
- description: entry.data.description || entry.data.title,
- keywords: entry.data.ta... | Use `siteConfig.lang` as the default value here? |
fuwari | github_2023 | others | 151 | saicaca | saicaca | @@ -41,10 +41,11 @@ const jsonLd = {
url: Astro.site,
},
datePublished: formatDateToYYYYMMDD(entry.data.published),
+ inLanguage: entry.data.language.replace('_', '-'), | My apologies for not being clear. I meant that if the `lang` in the frontmatter is not set, use the default one from `config.ts`. |
fuwari | github_2023 | typescript | 130 | saicaca | saicaca | @@ -0,0 +1,38 @@
+import Key from '../i18nKey'
+import type { Translation } from '../translation'
+
+export const ko_KR: Translation = { | I think renaming this to `ko` would be better, making it consistent with `en` and `jp`. |
fuwari | github_2023 | typescript | 130 | saicaca | saicaca | @@ -9,7 +9,7 @@ import { LinkPreset } from './types/config'
export const siteConfig: SiteConfig = {
title: 'Fuwari',
subtitle: 'Demo Site',
- lang: 'en', // 'en', 'zh_CN', 'zh_TW', 'ja'
+ lang: 'en', // 'en', 'zh_CN', 'zh_TW', 'ja', 'ko_KR' | Here as well |
fuwari | github_2023 | others | 27 | saicaca | saicaca | @@ -0,0 +1,55 @@
+# 🍥Fuwari
+
+[Astro](https://astro.build)で構築された静的ブログテンプレート
+
+[**🖥️Live Demo (Vercel)**](https://fuwari.vercel.app) / [**🌏English README**](https://github.com/saicaca/fuwari) / [**🌏中文 README**](https://github.com/saicaca/fuwari/bl... | This sentence in the EN version was just updated, could you please update the translation?
And replacing the document link with the JP version(https://docs.astro.build/ja/guides/deploy/) would be better. |
fuwari | github_2023 | others | 27 | saicaca | saicaca | @@ -0,0 +1,55 @@
+# 🍥Fuwari
+
+[Astro](https://astro.build)で構築された静的ブログテンプレート
+
+[**🖥️Live Demo (Vercel)**](https://fuwari.vercel.app) / [**🌏English README**](https://github.com/saicaca/fuwari) / [**🌏中文 README**](https://github.com/saicaca/fuwari/bl... | Could you please also translate the link texts here? |
fuwari | github_2023 | others | 27 | saicaca | saicaca | @@ -2,7 +2,7 @@
A static blog template built with [Astro](https://astro.build).
-[**🖥️Live Demo (Vercel)**](https://fuwari.vercel.app) / [**🌏中文 README**](https://github.com/saicaca/fuwari/blob/main/README.zh-CN.md) / [**📦Old Hexo Version**](htt... | The link texts look too long. Maybe this is better:
[**🖥️Live Demo (Vercel)**](https://fuwari.vercel.app) / [**🌏中文**](https://github.com/saicaca/fuwari/blob/main/README.zh-CN.md) [**🌏日本語**](https://github.com/saicaca/fuwari/blob/main/README.ja-JP.md) /... |
fuwari | github_2023 | typescript | 7 | saicaca | saicaca | @@ -1,30 +1,33 @@
-import {en} from "./languages/en.ts";
-import {zh_TW} from "./languages/zh_TW.ts";
-import {zh_CN} from "./languages/zh_CN.ts";
-import type I18nKey from "./i18nKey.ts";
-import {siteConfig} from "../config.ts";
+import { siteConfig } from '../config.ts'
+import type I18nKey from './i18nKey.ts'
+impo... | `languages` is unused here.
And I think it shouldn't be named `languages` since it specifies only one language. Just name it `l` or something? |
fuwari | github_2023 | others | 7 | saicaca | saicaca | @@ -1,11 +1,14 @@
-import getReadingTime from 'reading-time';
-import { toString } from 'mdast-util-to-string';
+import { mdastUtilToString } from 'mdast-util-to-string' | This breaks the word-counting.
`toString` of `mdast-util-to-string` is needed for processing markdown text. |
fuwari | github_2023 | others | 4 | saicaca | saicaca | @@ -38,9 +38,10 @@ const className = Astro.props.class;
/*&:after*/
/* background: var(--link-active)*/
code
- font-family: monospace
+ font-family: Consolas | 非 Windows 系统不一定有 Consolas 字体,`monospace` 应该放到最后作为 fallback
建议改成 `ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace` |
fuwari | github_2023 | others | 4 | saicaca | saicaca | @@ -38,9 +38,10 @@ const className = Astro.props.class;
/*&:after*/
/* background: var(--link-active)*/
code
- font-family: monospace
+ font-family: Consolas
background: var(--inline-code-bg)
color: var(--inline-code-color)
+ font-size: 16px | 默认的 font-size 确实太小了,不过一般还是会比正文小一点,建议改成 `0.875rem` |
awesome-ros-deliberation | github_2023 | others | 9 | ros-wg-delib | ct2034 | @@ -9,34 +9,39 @@
Deliberation is the topmost layer in a robotics architecture sometimes also called mission or planning layer.
It aims at controlling the robots behavior towards its extended goal or function.
This includes pre-programmed state machines, automated symbolic planning as well as behavioral reaction to ... | Thanks. This sounds like a great idea |
awesome-ros-deliberation | github_2023 | others | 6 | ros-wg-delib | ct2034 | @@ -14,20 +14,21 @@ This includes pre-programmed state machines, automated symbolic planning as well
- [Packages](#packages)
- [Presentations](#presentations)
-- [Papers](#papers)
+- [Papers & Book Chapters](#papers-and-book-chapters) | ```suggestion
- [Papers and Book Chapters](#papers-and-book-chapters)
``` |
awesome-ros-deliberation | github_2023 | others | 2 | ros-wg-delib | ct2034 | @@ -5,37 +5,50 @@
[](LICENSE) [](https://github.com/ros-wg-del... | ```suggestion
Deliberation is the topmost layer in a robotics architecture sometimes also called mission or planning layer. It aims at controlling the robots behavior towards its extended goal or function. This includes pre-programmed state machines, automated symbolic planning as well as behavioral reaction to unfore... |
yen | github_2023 | others | 16 | tusharsadhwani | tusharsadhwani | @@ -0,0 +1,23 @@
+# Set yenpath variable to the user's .yen\bin directory
+$yenpath = "$env:userprofile\.yen\bin"
+
+# Create the .yen\bin directory if it doesn't exist
+if (-not (Test-Path $yenpath)) {
+ New-Item -Path $yenpath -ItemType Directory | Out-Null
+}
+
+# Download yen executable and save it to the .yen\b... | ```suggestion
Write-Host "Successfully installed yen! Restart the shell to start using the 'yen' command."
``` |
OpenRLHF | github_2023 | python | 781 | OpenRLHF | UbeCc | @@ -170,16 +170,16 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
dump_labels = torch.full(labels.size(), self.loss_fn.IGNORE_INDEX).to(labels.device)
for response_ranges in infos["response_ranges"]:
... | Directly mask the next token of current message may mask the first token of the next message. Will this be negative for training?
For example, the conversation is
[
{"role": "user", "This is the 1st sentence"},
{"role": "assistant", "This is the 2nd sentence"},
{"role": "user", "This is the 3rd sentence"},
... |
OpenRLHF | github_2023 | python | 781 | OpenRLHF | UbeCc | @@ -170,16 +170,16 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
dump_labels = torch.full(labels.size(), self.loss_fn.IGNORE_INDEX).to(labels.device)
for response_ranges in infos["response_ranges"]:
... | BTW, here is the original code for sft: https://github.com/OpenRLHF/OpenRLHF/pull/586/files
Can you append the results of models without generating EOS? |
OpenRLHF | github_2023 | python | 781 | OpenRLHF | UbeCc | @@ -213,8 +213,8 @@ def packing_collate_fn(self, item_list):
if self.multiturn:
if len(infos["response_ranges"]) >= 1:
for i in range(len(info["response_ranges"])):
- info["response_ranges"][i][0] += infos["response_ranges"][-1][-1][1] # end_inde... | Can you provide the snippet that can directly replicate the code? (one example is fine)
Since `infos["response_ranges"][-1][-1][1]` should already be the index of the last message, I did not find the cases that are misaligned. Thank you! |
OpenRLHF | github_2023 | python | 685 | OpenRLHF | hijkzzz | @@ -260,7 +260,7 @@ def ppo_train(self, global_steps=0):
dataloader = DataLoader(
self.replay_buffer,
batch_size=self.replay_buffer.sample_batch_size,
- shuffle=True,
+ shuffle=False, | Would it be better to disable this only when ringattn is enabled? |
OpenRLHF | github_2023 | python | 685 | OpenRLHF | TideDra | @@ -213,11 +217,27 @@ def forward(
assert return_output
return output
- log_probs = log_probs_from_logits(output["logits"][:, :-1, :], sequences[:, 1:])
-
if not self.packing_samples:
action_log_probs = log_probs[:, -num_actions:] | `log_probs` is not defined here if we don't pack samples |
OpenRLHF | github_2023 | python | 685 | OpenRLHF | TideDra | @@ -840,6 +881,17 @@ def _generate_vllm(self, all_prompts: List[str], all_labels, **kwargs) -> List[S
# num_actions.append(max(1, sum(current_action_mask)))
num_actions.append(max(1, output_len))
+ # pad seq makes the sequence a multiple of ring_attention_size.... | if ring_attn_group is None, pad_len is not defined. |
OpenRLHF | github_2023 | python | 685 | OpenRLHF | TideDra | @@ -462,6 +487,16 @@ def training_step_critic(self, experience: Experience) -> Dict[str, float]:
attention_mask = torch.cat(
[torch.full_like(s, i + 1) for i, s in enumerate(experience.sequences)], dim=0
).unsqueeze(0)
+ # pad seq makes the sequence len a multiple o... | `pad_len` is not returned here |
OpenRLHF | github_2023 | python | 797 | OpenRLHF | richardodliu | @@ -332,12 +332,12 @@ def train(args):
parser.add_argument("--kl_target", type=float, default=None)
parser.add_argument("--init_kl_coef", type=float, default=0.01, help="KL penalty in PPO")
parser.add_argument(
- "--use_kl_estimator_k3",
- action="store_true",
- default=False,
+ ... | Maybe there is another way to rename these args |
OpenRLHF | github_2023 | python | 797 | OpenRLHF | hijkzzz | @@ -439,6 +439,11 @@ def train(args):
if args.advantage_estimator in ["rloo", "reinforce_baseline", "group_norm"]:
assert args.n_samples_per_prompt > 1, f"{args.advantage_estimator} requires n_samples_per_prompt > 1"
+ if args.use_kl_loss: | remove these lines and use k1 k2 k3 as the type name
|
OpenRLHF | github_2023 | python | 797 | OpenRLHF | hijkzzz | @@ -42,6 +42,11 @@ def _validate_args(args):
actor_world_size % critic_world_size == 0
), f"actor_world_size must be divisible by critic_world_size, got {actor_world_size} and {critic_world_size}"
+ if args.use_kl_loss: | remove these lines |
OpenRLHF | github_2023 | python | 797 | OpenRLHF | hijkzzz | @@ -19,15 +19,31 @@ def compute_approx_kl(
log_probs_base: Log probabilities of the base distribution.
action_mask: Mask for actions.
"""
-
- log_ratio = log_probs.float() - log_probs_base.float()
- if action_mask is not None:
- log_ratio = log_ratio * action_mask
-
+ # The relati... | do not use zhihu link (only chinese) |
OpenRLHF | github_2023 | others | 797 | OpenRLHF | richardodliu | @@ -28,7 +28,7 @@ ray job submit --address="http://127.0.0.1:8265" \
--init_kl_coef 1e-3 \
--gamma 1.0 \
--use_kl_loss \
- --use_kl_estimator_k3 \
+ --kl_estimator_type k3 \ | parser.add_argument(
"--kl_estimator",
type=str,
choices=["k1", "k2", "k3"],
default="k1",
help="Choose kl estimation method: k1, k2, k3",
) |
OpenRLHF | github_2023 | python | 797 | OpenRLHF | richardodliu | @@ -42,6 +42,10 @@ def _validate_args(args):
actor_world_size % critic_world_size == 0
), f"actor_world_size must be divisible by critic_world_size, got {actor_world_size} and {critic_world_size}"
+ if args.use_kl_loss:
+ assert args.kl_estimate in ["k2", "k3"], f"When using KL as loss... | As we only have three choices, I think there is no reason for checking when "else" |
OpenRLHF | github_2023 | python | 746 | OpenRLHF | hijkzzz | @@ -578,10 +588,6 @@ def make_experience(self, samples: Samples) -> Experience:
else:
value_ref = ray.put(None)
- if args.colocate_actor_ref: | Why remove this line? |
OpenRLHF | github_2023 | python | 746 | OpenRLHF | hijkzzz | @@ -636,26 +643,44 @@ def make_experience(self, samples: Samples) -> Experience:
if args.colocate_actor_ref or args.colocate_all_models:
torch.cuda.empty_cache()
- kl = compute_approx_kl(
- action_log_probs,
- base_action_log_probs,
- action_mask=action_ma... | Why are these lines so complicated? |
OpenRLHF | github_2023 | python | 586 | OpenRLHF | hijkzzz | @@ -131,7 +171,7 @@ def __getitem__(self, idx):
# to avoid EOS_token truncation
input_token["input_ids"][0][-1] = self.tokenizer.eos_token_id
input_token["attention_mask"][0][-1] = True
- info = {"input": prompt, "output": response, "input_length": input_token["attention_ma... | It seems we should remove input_length, as it will no longer be used? |
OpenRLHF | github_2023 | python | 514 | OpenRLHF | zhuzilin | @@ -332,7 +376,10 @@ def make_experience(self, samples: Samples) -> Experience:
@torch.no_grad()
def process_experiences(self, experiences: List[Experience]) -> List[Experience]:
# TODO: add more methods to process experiences
- return experiences
+ if self.advantage_estimator in ["grou... | I think there is problem when `--packing_samples` is off, because in that case, tensors in each experiences will be padded into difference length. |
OpenRLHF | github_2023 | python | 524 | OpenRLHF | wuxibin89 | @@ -28,10 +28,11 @@ def __init__(self, world_size, rank, local_rank, master_addr, master_port):
os.environ["MASTER_PORT"] = str(self._master_port)
os.environ["WORLD_SIZE"] = str(self._world_size)
os.environ["RANK"] = str(self._rank)
- # NOTE: Ray will automatically set the CUDA_VISIBLE... | @HollowMan6 If PPORayActorGroup use whole GPUs of a machine, (that is `num_gpus_per_node=8` for NVIDIA A100/H100, or `num_gpus_per_node=16` for NVIDIA L20), then LOCAL_RANK is the correct device index for this DistributedTorchRayActor.
But when `num_gpus_per_node=4`, then two PPORayActorGroup may be scheduled on same... |
OpenRLHF | github_2023 | python | 650 | OpenRLHF | Freder-chen | @@ -177,16 +177,14 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
self.strategy.optimizer_step(self.optimizer, self.model, self.scheduler)
acc = (chosen_reward > reject_reward).float().mean().item()
- acc_mean = acc_mean * 0.9 + 0.1 * acc
... | The `loss_sum` should be placed in the outer loop; otherwise, anomalous behavior may occur at the end of an epoch. |
OpenRLHF | github_2023 | python | 576 | OpenRLHF | zhuzilin | @@ -158,12 +161,18 @@ def packing_collate_fn(self, item_list):
index = 1
for prompt_ids_len, input_id, attention_mask, info in item_list:
packed_input_ids.append(input_id.flatten())
- packed_attention_masks.append(torch.ones_like(input_id.flatten()) * index)
+ # pack... | please remove the commented line. |
OpenRLHF | github_2023 | python | 576 | OpenRLHF | zhuzilin | @@ -151,14 +163,20 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
if not self.pretrain_mode:
if self.packing_samples:
index = 0
- for input_length, source_len in zip(infos["input_length"], prompts_id_lens... | We'd better not all gather logits as the logit is proportion to vocab_size. Could you initialize the loss_fn with the process group, and allgather the labels, then calculate the loss of within each rank? In that way, we no longer need to write the code twice for both `train` and `evaluatue`. |
OpenRLHF | github_2023 | python | 576 | OpenRLHF | zhuzilin | @@ -270,4 +303,4 @@ def evaluate(self, eval_dataloader, steps=0):
elif self._tensorboard is not None:
for k, v in logs.items():
self._tensorboard.add_scalar(f"eval/{k}", v, steps)
- self.model.train() # reset model state
+ self.model.train() ... | please recover the origin newline. |
OpenRLHF | github_2023 | python | 576 | OpenRLHF | hijkzzz | @@ -13,17 +13,32 @@ class GPTLMLoss(nn.Module):
GPT Language Model Loss
"""
- def __init__(self):
+ def __init__(self, process_group=None): | Please add some annotation about the process_group for Ring Attention (maybe rename to ring_process_group )
Thanks |
OpenRLHF | github_2023 | python | 515 | OpenRLHF | hijkzzz | @@ -330,9 +330,23 @@ def make_experience(self, samples: Samples) -> Experience:
)
@torch.no_grad()
- def process_experiences(self, experiences: List[Experience]) -> List[Experience]:
- # TODO: add more methods to process experiences
- return experiences
+ def process_experiences(self... | It should be `rewards.reshape(-1, args.n_samples_per_prompt)` ? |
OpenRLHF | github_2023 | python | 536 | OpenRLHF | hijkzzz | @@ -18,7 +26,8 @@ def __init__(self, *args, **kwargs):
self.__version__ = vllm.__version__
assert self.__version__ >= "0.4.1", "OpenRLHF only supports vLLM >= 0.4.1"
- self.use_gpu_executor = kwargs["tensor_parallel_size"] == 1
+ noset_visible_devices = kwargs.pop("noset_visible_device... | why we need `not noset_visible_devices` here?
`use_gpu_executor ` is designed for TP=1 |
OpenRLHF | github_2023 | python | 536 | OpenRLHF | hijkzzz | @@ -83,12 +92,17 @@ def create_vllm_engines(
max_model_len: int,
):
vllm_engines = []
+ # RAY_EXPERIMENTAL_NOSET_*_VISIBLE_DEVICES will always be set in current context,
+ # So we need to get env variables from ray process to check if it is set.
+ noset_visible_devices = ray_noset_visible_devices(ra... | Why we have to get the env from remote worker? |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -90,6 +91,12 @@ def blending_datasets(
train_data = data[train_split].select(range(min(max_count, len(data[train_split]))))
else:
train_data = data.select(range(min(max_count, len(data))))
+
+ # The residual data causes gradient accumulation issues in the last few st... | I'm a little confused with the issue here, as the `train_dataloader` has already set with `drop_last=True`, which should serve the same function as the code added here. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -91,8 +95,7 @@ def process_data(self, data):
add_special_tokens=False,
)
prompt_ids_len = prompt_token["attention_mask"].int().sum().item()
-
- # filter the sample whose length is greater than max_length (2 for answer length)
+ # print(prompt_ids_len) | please recover the comment here. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -81,6 +84,7 @@ def process_data(self, data):
self.output_key,
apply_chat_template=None if self.pretrain_mode else self.apply_chat_template,
)
+ | please remove the newline added here to make the pr clean. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -1,11 +1,9 @@
from typing import Callable
-
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
-
from .utils import zero_pad_sequences
-
+from torch.nn import functional as F | please do `pre-commit install` before the commit, which will do auto formatting before commit. Also please recover the removed line here. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -52,6 +52,7 @@ def train(args):
max_count=args.max_samples,
train_split=args.train_split,
eval_split=args.eval_split,
+ train_batch_size=args.train_batch_size, | This doesn't seem to be necessary, as the `blending_datasets` only serve as a function to blend datasets, and it should not require a `train_batch_size`. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -64,7 +64,10 @@ def __init__(
# Parallel loading datasets
processed_dataset = dataset.map(
- self.process_data, remove_columns=dataset.column_names, num_proc=num_processors
+ self.process_data,
+ remove_columns=dataset.column_names,
+ num_proc=num_pro... | please remove the `keep_in_memory=False` which is not in the scope of this PR. If you hope to add this config, it will be nice to propose another PR that apply this change to all the `dataset.map`s. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -124,15 +127,14 @@ def __getitem__(self, idx):
return_tensors="pt",
add_special_tokens=False,
)
-
if not self.pretrain_mode:
# to avoid EOS_token truncation
input_token["input_ids"][0][-1] = self.tokenizer.eos_token_id
input_token["at... | same as above. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -148,23 +150,29 @@ def collate_fn(self, item_list):
input_ids = zero_pad_sequences(input_ids, "right", self.tokenizer.pad_token_id)
attention_masks = zero_pad_sequences(attention_masks, "right")
- return prompt_ids_lens, input_ids, attention_masks, infos
+ return prompt_ids_lens, in... | please keep the origin code and add a newline for `packed_seq_lens`
```suggestion
packed_seq_lens = []
``` |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -158,28 +150,84 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
else:
for label, source_len in zip(labels, prompts_id_lens):
label[:source_len] = self.loss_fn.IGNORE_INDEX
+
+ if sel... | The scheduler and optimizer will be updated by deepspeed, please don't do the manual update. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -127,28 +127,20 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
# train
self.model.train()
loss_mean = 0
- for prompts_id_lens, inputs, attention_masks, infos in self.train_dataloader:
+ accumulated_loss = 0
+ accumu... | these variables should be removed? |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | catqaq | @@ -158,28 +150,84 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
else:
for label, source_len in zip(labels, prompts_id_lens):
label[:source_len] = self.loss_fn.IGNORE_INDEX
+
+ if sel... | hi, num_calculate_tokens seems not defined? |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | catqaq | @@ -127,28 +127,20 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
# train
self.model.train()
loss_mean = 0
- for prompts_id_lens, inputs, attention_masks, infos in self.train_dataloader:
+ accumulated_loss = 0 | as @zhuzilin mentioned, if you don't want to do average in a batch, just remove it. Now, although the accumulated_loss is accumulated, it is not used afterward. |
OpenRLHF | github_2023 | python | 477 | OpenRLHF | zhuzilin | @@ -159,11 +148,63 @@ def fit(self, args, consumed_samples=0, num_update_steps_per_epoch=None):
for label, source_len in zip(labels, prompts_id_lens):
label[:source_len] = self.loss_fn.IGNORE_INDEX
- gpt_loss = self.loss_fn(output.logits, labels)
+ ... | the `step_bar.update()` has been updated below. And I suggest not adding `torch.cuda.empty_cache()` here by default because it will harm the performance. |
OpenRLHF | github_2023 | python | 478 | OpenRLHF | catqaq | @@ -251,62 +237,69 @@ def process_experiences(self, experiences: List[Experience]) -> List[Experience]
return experiences
@torch.no_grad()
- def get_advantages_and_returns(
- self,
- values: torch.Tensor,
- rewards: torch.Tensor,
- action_mask: torch.Tensor,
- gamma... | Considering future scalability, would it be better to encapsulate PPO and RLOO in get_advantages_and_returns as separate functions? |
OpenRLHF | github_2023 | python | 478 | OpenRLHF | catqaq | @@ -251,62 +237,69 @@ def process_experiences(self, experiences: List[Experience]) -> List[Experience]
return experiences
@torch.no_grad()
- def get_advantages_and_returns(
- self,
- values: torch.Tensor,
- rewards: torch.Tensor,
- action_mask: torch.Tensor,
- gamma... | when n_samples_per_prompt=1, RLOO will be invalid. Should we directly raise an error? |
OpenRLHF | github_2023 | python | 474 | OpenRLHF | hijkzzz | @@ -438,10 +445,9 @@ def make_experience(self, prompts: Union[str, List[str]], **generate_kwargs) ->
}
if self.strategy.args.perf:
- batch_size = 1 if isinstance(prompts, str) else len(prompts)
- info["generate_time"] = torch.full((batch_size,), generate_time, device=device)
- ... | Why not record the average value? |
OpenRLHF | github_2023 | python | 442 | OpenRLHF | catqaq | @@ -254,3 +254,34 @@ def forward(self, logits: torch.Tensor, teacher_logits: torch.Tensor, label: tor
distil_loss = -torch.sum(x * mask.view(-1), dim=0) / torch.sum(mask.view(-1), dim=0)
return distil_loss
+
+
+class PRMLoss(nn.Module):
+ """
+ Process Reward Model Loss
+ """
+
+ def __i... | better to use matrix op instead of for loop |
OpenRLHF | github_2023 | python | 219 | OpenRLHF | wuxibin89 | @@ -92,7 +92,7 @@ def make_experience_batch(items: List[BufferItem]) -> Experience:
)
for key in keys:
vals = [getattr(item, key) for item in items]
- batch_data = zero_pad_sequences(vals, "left")
+ batch_data = zero_pad_sequences(vals, "right") | Should we make sequences right pad right after generation, since we have to calculate action_logits/ref_logits/values/rewards?
```bash
>>> attention_mask = torch.Tensor([[0,1,1,0], [1,1,1,1],[1,1,1,0]])
>>> attention_mask
tensor([[0., 1., 1., 0.],
[1., 1., 1., 1.],
[1., 1., 1., 0.]])
>>> attentio... |
OpenRLHF | github_2023 | python | 183 | OpenRLHF | wuxibin89 | @@ -94,10 +94,15 @@ def get_llm_for_sequence_regression(
model_name_or_path,
config=config,
trust_remote_code=True,
- torch_dtype=torch.bfloat16 if bf16 else "auto",
+ torch_dtype="auto", | Remove `torch_dtype` in `config = AutoConfig.from_pretrained` above, or transformers will throw exeception:
```
File "/opt/tiger/ray/session_2024-01-08_17-27-40_586070_1/runtime_resources/working_dir_files/_ray_pkg_681fd5b1b4890d09/openrlhf/trainer/ray/launcher.py", line 94, in init_model_from_pretrained
model... |
OpenRLHF | github_2023 | python | 361 | OpenRLHF | hijkzzz | @@ -110,27 +110,34 @@ def train(args):
# multiple reward models
reward_pretrains = args.reward_pretrain.split(",")
reward_models = []
- for _ in reward_pretrains:
- reward_models.append(
- PPORayActorGroup(
- args.reward_num_nodes,
- args.reward_num_gpus... | More safe:
`if args.vllm_num_engines is not None and args.vllm_num_engines > 0:` |
OpenRLHF | github_2023 | python | 341 | OpenRLHF | hijkzzz | @@ -124,13 +131,34 @@ def make_experience(self, prompts: Union[str, List[str]], **generate_kwargs) ->
action_log_probs = self.actor(sequences, num_actions, attention_mask)
# init log probs
- base_action_log_probs = self.initial_model(sequences, num_actions, attention_mask)
+ if self.re... | `skip_special_tokens=True` for the Reference Model will cause a bug because the tokens may not be aligned between the actor and reference model. |
OpenRLHF | github_2023 | python | 335 | OpenRLHF | hijkzzz | @@ -29,18 +30,68 @@ def init_process_group(self, master_address, master_port, rank_offset, world_siz
f"rank={rank}, world_size={world_size}, group_name={group_name}",
)
- def update_weight(self, name, dtype, shape, empty_cache=False):
+ def update_weight(self, name, dtype, shape, empty_cac... | I'm not sure if we can get the right weight as the VLLM have sliced the weights? |
OpenRLHF | github_2023 | python | 218 | OpenRLHF | wuxibin89 | @@ -175,7 +175,10 @@ def forward(
return_output=False,
) -> torch.Tensor:
"""Returns action log probs"""
- output = self.model(sequences, attention_mask=attention_mask)
+ # https://github.com/OpenLLMAI/OpenRLHF/issues/217
+ position_ids = attention_mask.long().cumsum(-1) - 1
... | We have both left padding and right padding for sequences at the same, is this correct way to calculate `position_ids`?
```bash
>>> attention_mask = torch.Tensor([[0,1,1,0], [1,1,1,1],[1,1,1,0]])
>>> attention_mask
tensor([[0., 1., 1., 0.],
[1., 1., 1., 1.],
[1., 1., 1., 0.]])
>>> position_ids =... |
OpenRLHF | github_2023 | python | 202 | OpenRLHF | wuxibin89 | @@ -1,47 +1,48 @@
from typing import Callable
- | coding style:通常import遵循这样的范式
- python标准包
- 第三方包
- 本地包
这三段之间用一行空行分割,这样便于阅读,比如:
https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L16-L46
|
OpenRLHF | github_2023 | python | 201 | OpenRLHF | hijkzzz | @@ -0,0 +1,226 @@
+from typing import Callable
+
+import numpy as np
+import torch
+from torch.utils.data import Dataset, DistributedSampler
+from tqdm import tqdm
+
+from .reward_dataset import RewardDataset
+from .utils import exist_and_not_none, zero_pad_sequences
+
+
+def preprocess_data(data):
+ """
+ Prepro... | Seems like we should add a condition here
```
if self.shuffle:
xxxxx
``` |
OpenRLHF | github_2023 | python | 160 | OpenRLHF | jovany-wang | @@ -16,8 +16,12 @@
from openrlhf.utils import blending_datasets, get_strategy, get_tokenizer
-def train(args):
- # sanity check
+# NOTE: reward function for multiple reward models, replace this with your own function!
+def reward_fn(rewards: List[torch.Tensor]):
+ return torch.stack(rewards).sum(dim=0)
+
+
+... | ```suggestion
def _validate_args(args):
``` |
OpenRLHF | github_2023 | others | 108 | OpenRLHF | jovany-wang | @@ -0,0 +1,35 @@
+set -x
+
+ray job submit --address="http://[fdbd:dc03:13:20a::138]:11077" \ | Should we pass address instead of hardcode? |
OpenRLHF | github_2023 | python | 108 | OpenRLHF | tsaoyu | @@ -0,0 +1,146 @@
+import argparse
+from datetime import datetime
+
+import ray
+
+from openllama2.trainer.ray import (
+ ActorModelRayActor,
+ CriticModelRayActor,
+ PPORayActorGroup,
+ ReferenceModelRayActor,
+ RewardModelRayActor,
+)
+from openllama2.utils import blending_datasets, get_strategy, get_t... | Maybe it worthwhile to check how torchx drive the training steps. https://github.com/pytorch/torchx/blob/main/torchx/schedulers/ray/ray_driver.py#L232
A lot of `ray.get()` make it hard to understand where the actual Actor are runing. |
OpenRLHF | github_2023 | python | 108 | OpenRLHF | tsaoyu | @@ -105,12 +115,25 @@ def __init__(
else:
self.kl_ctl = FixedKLController(init_kl_coef)
- self.experience_maker = NaiveExperienceMaker(actor, critic, reward_model, initial_model, self.kl_ctl, strategy)
+ # if reference/reward/critic models are ray actor handle, we should use Remote... | Should we also check ref. and reward model here? |
OpenRLHF | github_2023 | python | 108 | OpenRLHF | tsaoyu | @@ -186,3 +188,76 @@ def get_advantages_and_returns(
advantages = torch.stack(advantages_reversed[::-1], dim=1)
returns = advantages + values
return advantages.detach(), returns
+
+
+class RemoteExperienceMaker(NaiveExperienceMaker):
+ @torch.no_grad()
+ def make_experience(self, input_... | It is kind pity to see due to limitation of Ray, we still have to move all tensors to CPU before calling `.remote()` funciton |
OpenRLHF | github_2023 | python | 108 | OpenRLHF | tsaoyu | @@ -0,0 +1,195 @@
+import logging
+import os
+import socket
+from typing import Optional, Type
+
+import ray
+import torch
+from ray.util.placement_group import placement_group
+from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
+
+from openllama2.models import Actor, Critic, RewardModel
+from ... | Will this conflict with deepspeed? DS also manage `CUDA_VISIBLE_DEVICES` |
OpenRLHF | github_2023 | python | 86 | OpenRLHF | hijkzzz | @@ -87,10 +102,16 @@ def fit(self, use_lora):
loss = self.loss_fn(logits, labels)
self.strategy.backward(loss, self.model, self.optimizer)
self.strategy.optimizer_step(self.optimizer, self.model, self.scheduler)
-
+
step_bar.update()
+ ... | 1000 is too big, it seems that it can be changed to `self.strategy.accumulated_gradient` here |
OpenRLHF | github_2023 | python | 86 | OpenRLHF | hijkzzz | @@ -112,6 +112,19 @@ def __init__(self,
self.actor.gradient_checkpointing_enable()
self.critic.gradient_checkpointing_enable()
+ if self.strategy.args.use_wandb:
+ import wandb
+ self._wandb = wandb
+ wandb.login(key=strategy.args.use_wandb)
+ ... | It seems that I did not see the `self._wandb.log(xxx)` in the PPO trainer
you could add it in line 162, behind `self.strategy.print(status)` |
quenti | github_2023 | typescript | 308 | quenti-io | uncenter | @@ -64,7 +64,7 @@ export const Footer = () => {
}}
>
<IconCopyright size={12} />
- <Text fontSize="sm">2023</Text>
+ <Text fontSize="sm">2024</Text> | I mean at this point couldn't you just do:
```jsx
<Text fontSize="sm">{new Date().getFullYear()}</Text>
``` |
quenti | github_2023 | typescript | 103 | quenti-io | miapolis | @@ -64,12 +69,20 @@ export const MatchSummary = () => {
return (
<Container maxW="container.md" py="10" display="flex" alignItems="center">
<Stack spacing="6" w="full">
- <MatchSummaryFeedback
- elapsed={elapsed}
- highscore={highscore}
- highscores={leaderboard.data.hig... | Label it as "Woah! You're too fast!" or something instead |
quenti | github_2023 | typescript | 103 | quenti-io | miapolis | @@ -64,12 +69,20 @@ export const MatchSummary = () => {
return (
<Container maxW="container.md" py="10" display="flex" alignItems="center">
<Stack spacing="6" w="full">
- <MatchSummaryFeedback
- elapsed={elapsed}
- highscore={highscore}
- highscores={leaderboard.data.hig... | "Your time was too fast to record on our leaderboard." |
quenti | github_2023 | typescript | 103 | quenti-io | miapolis | @@ -32,18 +33,22 @@ export const MatchSummary = () => {
}
);
+ const isGood = elapsed > MATCH_MIN_TIME | Cringe variable name I'd try 'processable' 'isValid' or 'storable' |
v-analyzer | github_2023 | others | 90 | v-analyzer | spytheman | @@ -142,10 +142,10 @@ This repository also contains the source code for the VS Code extension in the
folder.
It is also available via the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=VOSCA.vscode-v-analyzer).
-## NVIM LSP
+## NVIM LSP / Mason | What is Mason in this context? |
v-analyzer | github_2023 | others | 84 | v-analyzer | spytheman | @@ -75,10 +84,9 @@ fn build(mode ReleaseMode, explicit_debug bool) {
println('Release mode is recommended for production use. At runtime, it is about 30-40% faster than debug mode.')
}
- res := mode.compile() | given this change, .compile() is not used anymore, and can be potentially removed. |
v-analyzer | github_2023 | others | 83 | v-analyzer | spytheman | @@ -6,12 +6,13 @@ on:
branches:
- main
paths-ignore:
- - '.github/**'
- '**/test/**'
- '**/tests/**'
- - '**/*.md'
- '**/test_*.v'
- '**/*_test.v'
+ - '**/*.md'
+ - '.github/**'
+ - '!.github/nightly_release.yml' | ```suggestion
- '!.github/workflows/nightly_release.yml'
```
|
v-analyzer | github_2023 | others | 83 | v-analyzer | spytheman | @@ -59,36 +61,32 @@ jobs:
cd v-analyzer
v run build.vsh release
- - name: Create artifact
+ - name: Upload artifact
uses: actions/upload-artifact@v3
with:
- name: v-analyzer-${{ matrix.target }}
+ name: ${{ env.ARTIFACT }}
path: ./v-analyz... | That was needed, otherwise the zips will contain binaries that are not marked as executable. |
v-analyzer | github_2023 | others | 81 | v-analyzer | spytheman | @@ -24,13 +24,10 @@ jobs:
with:
check-latest: true
- - name: Install via webscript (Windows)
- if: runner.os == 'Windows'
- run: curl -o install.vsh https://raw.githubusercontent.com/v-analyzer/v-analyzer/main/install.vsh; v run install.vsh; del install.vsh
-
- - name: Inst... | No, the idea here is to test directly what people that followed the README will do.
It is one line there, it should be 1 line here as well, even if it is long. |
v-analyzer | github_2023 | others | 81 | v-analyzer | spytheman | @@ -64,7 +67,6 @@ jobs:
nightly-release:
name: Create Nightly GitHub Release
- if: github.event.repository.full_name == 'v-analyzer/v-analyzer' | Why remove this line?
There is no need for other forks to create releases. |
v-analyzer | github_2023 | others | 81 | v-analyzer | spytheman | @@ -0,0 +1,39 @@
+name: Test v_tree_sitter
+
+on:
+ push:
+ paths:
+ - 'tree_sitter_v/**'
+ - 'v_tree_sitter/**'
+ pull_request:
+ paths:
+ - 'tree_sitter_v/**'
+ - 'v_tree_sitter/**'
+
+jobs:
+ test:
+ strategy:
+ matrix:
+ include:
+ - os: windows-latest
+ ... | That will just clone *the current repo* in a folder named `v_tree_sitter`. Imho there is no need for that.
`.github/workflows/analyzer_tests.yml` already does `v test .` which will run the tests in all subfolders as well. |
v-analyzer | github_2023 | others | 42 | v-analyzer | spytheman | @@ -31,39 +24,27 @@ enum ReleaseMode {
dev
}
-fn (m ReleaseMode) cmd() fn () os.Result {
- return match m {
- .release { release }
- .debug { debug }
- .dev { dev }
+fn (m ReleaseMode) cc_flags() string {
+ $if windows {
+ return '-cc gcc' // TCC cannot build tree-sitter on Windows
+ } $else $if cross_compile_... | debug should not have -prod it should have -g instead, it also should not have `-cflags "-O3 -DNDEBUG"` (those are implied by -prod) |
v-analyzer | github_2023 | others | 42 | v-analyzer | spytheman | @@ -31,39 +24,27 @@ enum ReleaseMode {
dev
}
-fn (m ReleaseMode) cmd() fn () os.Result {
- return match m {
- .release { release }
- .debug { debug }
- .dev { dev }
+fn (m ReleaseMode) cc_flags() string {
+ $if windows {
+ return '-cc gcc' // TCC cannot build tree-sitter on Windows
+ } $else $if cross_compile_... | `-cflags "-O3 -DNDEBUG"` that is redundant, only `-prod` is enough |
v-analyzer | github_2023 | others | 42 | v-analyzer | spytheman | @@ -79,9 +60,7 @@ fn build(mode ReleaseMode, explicit_debug bool) {
prepare_output_dir()
println('${term.green('✓')} Prepared output directory')
- cmd := mode.cmd()
- cmd_name := mode.str()
- println('Building v-analyzer in ${term.bold(cmd_name)} mode...')
+ println('Building v-analyzer in ${term.bold(mode.str())... | should not that be: `mode.cmd().str()` ? |
v-analyzer | github_2023 | others | 46 | v-analyzer | spytheman | @@ -11,11 +11,9 @@ import jsonrpc
import streams
import analyzer
import lsp.log
+import v.vmod
-// version is the current version of the analyzer.
-// When you release a new version, make sure to update this constant and
-// and any other places that need to be updated (use search across the project).
-const versi... | The comments do seem useful, at least for now, since `0.0.1-beta.1` is spread in other places as well. |
v-analyzer | github_2023 | others | 77 | v-analyzer | spytheman | @@ -155,8 +155,7 @@ pub fn (mut ls LanguageServer) handle_jsonrpc(request &jsonrpc.Request, mut rw j
ls.initialized(mut rw)
}
'exit' {
- // ignore for the reasons stated in the above comment
- // ls.exit()
+ ls.exit() | I think the stated reasons are the comments from 146 to 150:
```
// Note: LSP specification is unclear whether or not
// a shutdown request is allowed before server init
// but we'll just put it here since we want to formally
// shutdown the server after a certain timeout period.
```
but I also think tha... |
v-analyzer | github_2023 | others | 71 | v-analyzer | spytheman | @@ -12,33 +12,25 @@ jobs:
test:
strategy:
matrix:
- include:
- - os: ubuntu-20.04
- - os: macos-latest
+ os: [ubuntu-20.04, macos-latest, windows-latest]
+ fail-fast: false
name: Install v-analyzer on ${{ matrix.os }}
runs-on: ${{ matrix.os }}
- continu... | Use `Unix`, or `nix` here. `Nix` unfortunately has a specific meaning these days (https://en.wikipedia.org/wiki/Nix_(package_manager)) , and can be confusing, since it is a package manager, and the install.vsh script is also used for installing something. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.